query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Dynamic library stub function
def stubFunc( *args, **keywords ): maya.cmds.dynamicLoad( library ) # call the real function which has replaced us return maya.cmds.__dict__[command]( *args, **keywords )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_stub(self):\n pass", "def generate_ext_stub(cls):\n # Qualified name (C Version)\n qname = '_'.join(symbol_table.prefix+[cls.name])\n self.exts.append(qname)\n\n if self.config.verbose:\n import sys\n mod_name = '.'.join(symbol_table.prefix[1:]+[cls.name])\n sys.stdout.write('\\r'+' '*80)\n sys.stdout.write('\\rgenerating glue code for %s'%mod_name)\n sys.stdout.flush()\n\n # Consolidate all methods, defined and inherited\n cls.scan_methods()\n \n # chpl_defs = ChapelScope(chpl_stub)\n ci = self.ClassInfo(cls)\n\n # if self.server:\n # ci.impl = self.pkg_impl\n\n ci.stub.new_def(babel.externals(cls.get_scoped_id()))\n ci.stub.new_def(babel.builtin_stub_functions(cls.get_scoped_id()))\n \n has_contracts = ior_template.generateContractChecks(cls)\n self.gen_default_methods(cls, has_contracts, ci)\n\n #print qname, map(lambda x: x[2][1]+x[2][2], cls.all_methods)\n for method in cls.all_methods:\n (Method, Type, Name, Attrs, Args, \n Except, From, Requires, Ensures, DocComment) = method\n ci.epv.add_method((method, Type, Name, Attrs, \n babel.drop_rarray_ext_args(Args),\n Except, From, Requires, Ensures, DocComment))\n\n # all the methods for which we would generate a server impl\n impl_methods = babel.builtins+cls.get_methods()\n impl_methods_names = [sidlir.method_method_name(m) for m in impl_methods]\n\n # client\n for method in cls.all_methods:\n has_impl = sidlir.method_method_name(method) in impl_methods_names\n self.generate_client_method(symbol_table, method, ci, has_impl)\n\n if self.server:\n class_methods = filter(sidlir.is_not_static, impl_methods)\n static_methods = filter(sidlir.is_static, impl_methods)\n\n # # Class\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # 'class %s_Impl {'%qname)\n # splicer = '.'.join(cls.qualified_name+['Impl'])\n # ci.impl.new_def('// DO-NOT-DELETE splicer.begin(%s)'%splicer)\n # ci.impl.new_def('// DO-NOT-DELETE splicer.end(%s)'%splicer)\n # for method in class_methods: \n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('} // class %s_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n # # Static\n # if static_methods:\n # ci.impl.new_def('// all static member functions of '+qname)\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # '// FIXME: chpl allows only one module per library //'+\n # ' module %s_static_Impl {'%qname)\n\n # for method in static_methods:\n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('//} // module %s_static_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n\n # # Chapel Stub (client-side Chapel bindings)\n # self.generate_chpl_stub(chpl_stub, qname, ci)\n \n # # Because of Chapel's implicit (filename-based) modules it\n # # is important for the Chapel stub to be one file, but we\n # # generate separate files for the cstubs\n # self.pkg_chpl_stub.new_def(chpl_stub)\n\n # Stub (in C), the order of these definitions is somewhat sensitive\n ci.stub.genh_top(ir.Import(qname+'_IOR'))\n ci.stub.gen(ir.Import(ci.stub._name))\n\n pkg_name = '_'.join(symbol_table.prefix)\n ci.stub.gen(ir.Import(pkg_name))\n ci.stub.write()\n\n # IOR\n ior_template.generate_ior(ci, with_ior_c=self.server, _braid_config=self.config )\n ci.ior.write()\n\n # Skeleton\n if self.server:\n self.generate_skeleton(ci, qname)\n\n # Convenience header\n ext_h = CFile(qname)\n ext_h.genh(ir.Import(qname+'_IOR'))\n ext_h.genh(ir.Import(qname+'_Stub'))\n ext_h.write()\n\n # Makefile\n self.classes.append(qname)", "def __def_function__():\n pass", "def __init__(self, libpath):\n self._lib = CDLL(libpath)\n self._functions = {}", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)", "def __call__(fun_name):", "def GenerateFixedFunctions(self, out):\n out.Write(\"\"\"\n\nstatic PPB_GetInterface __real_PPBGetInterface;\nstatic PPP_GetInterface_Type __real_PPPGetInterface;\n\nvoid __set_real_%(wrapper_prefix)s_PPBGetInterface(PPB_GetInterface real) {\n __real_PPBGetInterface = real;\n}\n\nvoid __set_real_%(wrapper_prefix)s_PPPGetInterface(PPP_GetInterface_Type real) {\n __real_PPPGetInterface = real;\n}\n\n/* Map interface string -> wrapper metadata */\nstatic struct %(wrapper_struct)s *%(wrapper_prefix)sPPBShimIface(\n const char *name) {\n struct %(wrapper_struct)s **next = s_ppb_wrappers;\n while (*next != NULL) {\n if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;\n ++next;\n }\n return NULL;\n}\n\n/* Map interface string -> wrapper metadata */\nstatic struct %(wrapper_struct)s *%(wrapper_prefix)sPPPShimIface(\n const char *name) {\n struct %(wrapper_struct)s **next = s_ppp_wrappers;\n while (*next != NULL) {\n if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;\n ++next;\n }\n return NULL;\n}\n\nconst void *__%(wrapper_prefix)s_PPBGetInterface(const char *name) {\n struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPBShimIface(name);\n if (wrapper == NULL) {\n /* We did not generate a wrapper for this, so return the real interface. */\n return (*__real_PPBGetInterface)(name);\n }\n\n /* Initialize the real_iface if it hasn't been. The wrapper depends on it. */\n if (wrapper->real_iface == NULL) {\n const void *iface = (*__real_PPBGetInterface)(name);\n if (NULL == iface) return NULL;\n wrapper->real_iface = iface;\n }\n\n return wrapper->wrapped_iface;\n}\n\nconst void *__%(wrapper_prefix)s_PPPGetInterface(const char *name) {\n struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPPShimIface(name);\n if (wrapper == NULL) {\n /* We did not generate a wrapper for this, so return the real interface. */\n return (*__real_PPPGetInterface)(name);\n }\n\n /* Initialize the real_iface if it hasn't been. The wrapper depends on it. */\n if (wrapper->real_iface == NULL) {\n const void *iface = (*__real_PPPGetInterface)(name);\n if (NULL == iface) return NULL;\n wrapper->real_iface = iface;\n }\n\n return wrapper->wrapped_iface;\n}\n\"\"\" % { 'wrapper_struct' : self.GetWrapperMetadataName(),\n 'wrapper_prefix' : self.wrapper_prefix,\n } )", "def _create_impl(self):", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def _build_impl(self):", "def gen_library(self):\n newlibrary = self.newlibrary\n whelpers.add_all_helpers(newlibrary.symtab)\n\n self.function_index = newlibrary.function_index\n self.class_map = newlibrary.class_map\n\n self.instantiate_all_classes(newlibrary.wrap_namespace)\n self.update_templated_typemaps(newlibrary.wrap_namespace)\n self.gen_namespace(newlibrary.wrap_namespace)", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def _cached_create_libspec(\n self,\n libname: str,\n is_builtin: bool,\n target_file: Optional[str],\n args: Optional[str],\n *,\n _internal_force_text=False, # Should only be set from within this function.\n ) -> Optional[str]:\n from robotframework_ls.impl import robot_constants\n\n if not is_builtin:\n if not target_file:\n is_builtin = libname in robot_constants.STDLIBS\n\n import time\n from robocorp_ls_core.subprocess_wrapper import subprocess\n from robocorp_ls_core.robotframework_log import get_log_level\n\n acquire_mutex = _timed_acquire_mutex_for_spec_filename\n\n if _internal_force_text:\n # In this case this is a recursive call and we already have the lock.\n acquire_mutex = NULL\n\n log_exception = log.exception\n if is_builtin and libname == \"Dialogs\" and get_log_level() < 1:\n # Dialogs may have dependencies that are not available, so, don't show\n # it unless verbose mode is enabled.\n log_exception = log.debug\n\n if not libname.replace(\".\", \"\").replace(\"/\", \"\").replace(\"\\\\\", \"\").strip():\n return f\"Unable to generate libspec for: {libname}\"\n\n additional_path = None\n additional_path_exists = False\n\n log_time = True\n cwd = None\n\n if target_file is not None:\n additional_path = os.path.dirname(target_file)\n if os.path.splitext(os.path.basename(target_file))[0] == \"__init__\":\n additional_path = os.path.dirname(additional_path)\n\n additional_path_exists = os.path.exists(additional_path)\n if additional_path and additional_path_exists:\n cwd = additional_path\n if libname.endswith((\"/\", \"\\\\\")):\n libname = libname[:-1]\n libname = os.path.basename(libname)\n if libname.lower().endswith((\".py\", \".class\", \".java\")):\n libname = os.path.splitext(libname)[0]\n\n curtime = time.time()\n\n try:\n try:\n call = [sys.executable]\n major_version = self.get_robot_major_version()\n if major_version < 4:\n call.extend(\"-m robot.libdoc --format XML\".split())\n else:\n call.extend(\n \"-m robot.libdoc --format XML --specdocformat RAW\".split()\n )\n\n if additional_path and additional_path_exists:\n call.extend([\"-P\", os.path.normpath(additional_path)])\n\n if _internal_force_text:\n call.append(\"--docformat\")\n call.append(\"text\")\n\n # Note: always set as a whole, so, iterate in generator is thread-safe.\n for entry in self._additional_pythonpath_folder_to_folder_info:\n if os.path.exists(entry):\n call.extend([\"-P\", os.path.normpath(entry)])\n\n if not args:\n call.append(libname)\n else:\n call.append(\"::\".join([libname, args]))\n\n libspec_filename = self._compute_libspec_filename(\n libname, is_builtin, target_file, args\n )\n\n log.debug(f\"Obtaining mutex to generate libspec: {libspec_filename}.\")\n with acquire_mutex(libspec_filename): # Could fail.\n log.debug(\n f\"Obtained mutex to generate libspec: {libspec_filename}.\"\n )\n call.append(libspec_filename)\n\n mtime: float = -1\n try:\n mtime = os.path.getmtime(libspec_filename)\n except:\n pass\n\n log.debug(\n \"Generating libspec for: %s.\\nCwd:%s\\nCommand line:\\n%s\",\n libname,\n cwd,\n \" \".join(call),\n )\n try:\n try:\n # Note: stdout is always subprocess.PIPE in this call.\n # Note: the env is always inherited (the process which has\n # the LibspecManager must be the target env already).\n self._subprocess_check_output(\n call,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n cwd=cwd,\n )\n except OSError as e:\n log.exception(\"Error calling: %s\", call)\n # We may have something as: Ignore OSError: [WinError 6] The handle is invalid,\n # give the result based on whether the file changed on disk.\n try:\n if mtime != os.path.getmtime(libspec_filename):\n _dump_spec_filename_additional_info(\n self,\n libspec_filename,\n is_builtin=is_builtin,\n obtain_mutex=False,\n )\n return None\n except:\n pass\n\n log.debug(\"Not retrying after OSError failure.\")\n return str(e)\n\n except subprocess.CalledProcessError as e:\n if not _internal_force_text:\n if (\n b\"reST format requires 'docutils' module to be installed\"\n in e.output\n ):\n return self._cached_create_libspec(\n libname,\n is_builtin,\n target_file,\n args,\n _internal_force_text=True,\n )\n\n log_exception(\n \"Error creating libspec: %s.\\nReturn code: %s\\nOutput:\\n%s\",\n libname,\n e.returncode,\n e.output,\n )\n bytes_output = e.output\n output = bytes_output.decode(\"utf-8\", \"replace\")\n\n # Remove things we don't want to show.\n for s in (\"Try --help\", \"--help\", \"Traceback\"):\n index = output.find(s)\n if index >= 0:\n output = output[:index].strip()\n\n if output:\n return output\n return f\"Error creating libspec: {output}\"\n\n _dump_spec_filename_additional_info(\n self,\n libspec_filename,\n is_builtin=is_builtin,\n obtain_mutex=False,\n )\n return None\n except Exception as e:\n log_exception(\"Error creating libspec: %s\", libname)\n return str(e)\n finally:\n if log_time:\n delta = time.time() - curtime\n log.debug(\"Took: %.2fs to generate info for: %s\" % (delta, libname))", "def fake(ctx, clean=False):\n work_dir = join(PROJ_ROOT, \"func\", \"dynlink\")\n build_dir = join(PROJ_ROOT, \"build\", \"libfake\")\n\n clean_dir(build_dir, clean)\n\n build_cmd = [\n \"cmake\",\n \"-GNinja\",\n \"-DFAASM_BUILD_SHARED=ON\",\n \"-DFAASM_BUILD_TYPE=wasm\",\n \"-DCMAKE_TOOLCHAIN_FILE={}\".format(CMAKE_TOOLCHAIN_FILE),\n \"-DCMAKE_BUILD_TYPE=Release\",\n \"-DCMAKE_INSTALL_PREFIX={}\".format(WASM_SYSROOT),\n work_dir,\n ]\n\n run(\" \".join(build_cmd), shell=True, cwd=build_dir, check=True)\n run(\"ninja\", shell=True, cwd=build_dir, check=True)\n run(\"ninja install\", shell=True, cwd=build_dir, check=True)\n\n # Copy shared object into place\n sysroot_files = join(WASM_SYSROOT, \"lib\", \"wasm32-wasi\", \"libfake*.so\")\n\n runtime_lib_dir = join(FAASM_RUNTIME_ROOT, \"lib\")\n if not exists(runtime_lib_dir):\n makedirs(runtime_lib_dir)\n\n run(\n \"cp {} {}\".format(sysroot_files, runtime_lib_dir),\n shell=True,\n check=True,\n )\n\n # Update env\n shell_env = copy(environ)\n shell_env.update(\n {\n \"LD_LIBRARY_PATH\": \"/usr/local/lib/\",\n }\n )\n\n # Run codegen\n shared_objs = [\n join(FAASM_RUNTIME_ROOT, \"lib\", \"libfakeLibA.so\"),\n join(FAASM_RUNTIME_ROOT, \"lib\", \"libfakeLibB.so\"),\n ]\n\n binary = find_codegen_shared_lib()\n\n for so in shared_objs:\n print(\"Running codegen for {}\".format(so))\n run(\"{} {}\".format(binary, so), env=shell_env, shell=True, check=True)", "def dlopen(ffi, *names):\r\n for name in names:", "def cpp_function(self):", "def dl():\n raise NotImplementedError()", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def process_library(self):\n self.process_namespace(self.newlibrary.wrap_namespace)", "def make_module_hook(self):\n res = \\\n\"\"\"{fname} = shared_object.{fname}\n {fname}.restype = POINTER({structname})\n {varname} = {fname}()\n\n\"\"\"\n fragments ={\n \"varname\": self._namespace_mangle(self.namespace) + \"_plugin\",\n \"fname\": \"___madz_LANG_python_get_out_struct\" if self.namespace == \"\" else \"___madz_LANG_python_get_\"+self._namespace_mangle(self.namespace) + \"_struct\",\n \"structname\": self.python_madz_types + (\"OUTSTRUCT\" if self.namespace == \"\" else self._namespace_mangle(self.namespace))\n }\n\n return res.format(**fragments)", "def stub_init(self, *args, **kwargs):\n self.data = ReturnValueStub()\n self.data.name = self.__class__.__name__\n self.name = self.data.name\n self.set_stub_resources()", "def generate(module_name, module_path, target_dir):\n if not (Path(module_path) / 'builtins.stub.py').exists():\n copy(Path(__file__).parent.parent / 'stubs/builtins.stub.py', module_path)\n build_swift_wrappers_module(module_name, module_path, target_dir)", "def fake_get_version(dec_type, *args, **kwargs):\n return None", "def on_libType(self):\n self.rf_libTree()\n self.rf_libPath()\n self.rf_libFileName()\n self.rf_libTab()\n self.rf_delInfo()" ]
[ "0.65117", "0.6033104", "0.59740293", "0.59045887", "0.58740264", "0.5853005", "0.58475363", "0.58138716", "0.5795929", "0.5775147", "0.5775147", "0.5775147", "0.5775147", "0.5775147", "0.57486016", "0.5738319", "0.57080203", "0.56999254", "0.56829923", "0.5643429", "0.5590015", "0.5578605", "0.5561192", "0.5561192", "0.55543274", "0.554199", "0.5541452", "0.55286324", "0.55196774", "0.5507782" ]
0.72174203
0
Process the "commandList" file that contains the mappings between command names and the libraries in which they are found. This function will install stub functions in maya.cmds for all commands that are not yet loaded. The stub functions will load the required library and then execute the command.
def processCommandList(): try: # Assume that maya.cmds.about and maya.cmds.internalVar are already registered # commandListPath = os.path.realpath( os.environ[ 'MAYA_LOCATION' ] ) platform = maya.cmds.about( os=True ) commandListPath = os.path.join( commandListPath, commandListLocations[platform], 'commandList' ) file = open( commandListPath, 'r' ) for line in file: commandName, library = line.split() if not commandName in maya.cmds.__dict__: maya.cmds.__dict__[commandName] = __makeStubFunc( commandName, library ) except: sys.stderr.write("Unable to process commandList %s" % commandListPath) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_commands(self, commands: List[str]):", "def __init__(self, command_list: list = None) -> None:\n if command_list is None:\n command_list = implemented_commands\n for command in command_list:\n setattr(self, command.get(\"name\").replace(\" \", \"_\"), self._SingleCommand(command))", "def run(self, commands: list[str]):\n ...", "def setup_commands(bot):\n # Reset the bot's command setup\n bot.reset_commands()\n # Load enabled mods\n for mod in bot.enabled_mods:\n try:\n full = 'mod_%s' % mod\n m = getattr(__import__('mods.%s' % full), full)\n except Exception:\n bot.log(ERROR, 'Importing the %s mod failed!' % mod)\n sys.excepthook(*sys.exc_info())\n continue\n\n try:\n bot.installed_mods[mod] = m\n # Check for a 404 handler, and replace the current one if there is\n p404 = getattr(m, 'handle_404', None)\n if p404:\n bot.cb_404 = p404\n\n # Check for a setup function, and run it if there is\n setup = getattr(m, 'setup', None)\n if setup:\n setup(bot)\n\n # Required command bank\n for cmd in m.command_bank:\n # Get the actual function\n func = getattr(m, cmd)\n # Get the args for the command\n data = m.command_bank[cmd]\n # If data[0] is true, mod_help will recognize this command\n if data[0]:\n bot.help_db[data[1]] = parse_help(func)\n # Get the main name and aliases inserted\n for alias in data[1:]:\n bot.command_db[alias] = func\n\n # Helper function for optional nameless multiples\n def add_optional(olist, name):\n olist.extend(getattr(m, f) for f in getattr(m, name, ()))\n\n # Optional filters are loaded and added to the list\n add_optional(bot.filters, 'filters')\n\n # Ditto for time-cycle callbacks\n add_optional(bot.periodic_cbs, 'periodic')\n\n # Handlers are the same, but structured as a dict with\n # \"type\": \"single function-name\" items\n handlers = getattr(m, 'handlers', None)\n if handlers:\n for cbtype in handlers:\n bot.handlers[cbtype].append(getattr(m, handlers[cbtype]))\n\n # Register any requirements\n # NOTE: By putting this at the end, we avoid the possibility of\n # getting fake requires.\n reqs = getattr(m, 'requires', None)\n if reqs:\n bot.required_mods.update(reqs)\n except Exception:\n bot.log(ERROR, 'Unable to install the %s mod!' % mod)\n del bot.installed_mods[mod]\n sys.excepthook(*sys.exc_info())\n\n missing = bot.required_mods - set(bot.installed_mods)\n if missing:\n raise MissingRequirementsError(missing)\n\n # And now for the post-install triggers.\n for mod, m in bot.installed_mods.items():\n post = getattr(m, 'post_prepare', None)\n if post:\n try:\n post(bot)\n except Exception:\n bot.log(ERROR, 'Unable to post-prepare the %s mod!' % mod)\n sys.excepthook(*sys.exc_info())", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def __init__(self, command_list, ):\n self.command_list = [] # all addition via function below\n self.add_command( command_list )", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def _LoadCommandMap(self):\n # Walk gslib/commands and find all commands.\n commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\n for f in os.listdir(commands_dir):\n # Handles no-extension files, etc.\n (module_name, ext) = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\n command_map = {}\n # Only include Command subclasses in the dict.\n for command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\n return command_map", "def register_commands(self):\n\n with open(self._full_register_name, 'r') as file_to_read:\n command_register = json.loads(file_to_read.read())\n\n commands = command_register.get(\"commands\")\n if commands is None:\n logging.error(\"Command register is incorrect\")\n return []\n\n command_objects = []\n\n for command in commands:\n module_name = command.get(\"module\")\n class_name = command.get(\"class_name\")\n\n if (module_name is None) or (class_name is None):\n logging.error(\"Commands in the register are described in incorrect way.\")\n raise KeyError()\n\n try:\n command_module = importlib.import_module(module_name)\n command_class = getattr(command_module, class_name)\n command_object = command_class()\n command_objects.append(command_object)\n except ModuleNotFoundError as e:\n logging.error(\"Command modules specified in the register are not found!\")\n raise e\n\n return command_objects", "def load_commands():\n return [AddBook, FindBook, FindBooks, EditBook, RemoveBook, ReviewBook]", "def get_matching_commands(self, command_word, is_no_command, command_list):\n candidates = []\n current_mode = bigsh.run.finder.mode_stack.current_mode()\n command_word_lower = command_word.lower()\n\n try:\n for command in command_list:\n # If this command is tied to a feature then check that the\n # feature is enabled.\n if is_command_feature_active(command, command.get('feature')) == False:\n continue\n \n # Check that the command is enabled for the current mode\n modes = command.get('mode')\n if not modes:\n raise error.CommandDescriptionError(\n 'Command description must specify a mode', command)\n if not _is_list(modes):\n modes = (modes,)\n if not _match_current_modes(command, current_mode, modes):\n continue\n \n if 'rbac-group' in command:\n rbac_group = command['rbac-group']\n if type(rbac_group) == 'str':\n rbac_group = [rbac_group]\n if bigsh.bigdb.enabled():\n # 'rbac-required' raises exceptions for failures.\n try:\n action_invoke('rbac-required', ({}, rbac_group))\n except error.CommandUnAuthorized, e:\n if debug.cli() or debug.description():\n print 'Not allowed:', e, command['self']\n continue\n # \n # If a 'no' command was requested, verify this command\n # support 'no' (can we tell from the type?)\n if is_no_command:\n if not is_no_command_supported(command):\n continue\n\n # Check that the name matches the command word\n name = command['name']\n if _is_string(name):\n name = name.lower()\n if name.startswith(command_word_lower):\n prefix_match = len(command_word) < len(name)\n candidates.append((command, prefix_match))\n elif isinstance(name, collections.Mapping):\n # FIXME: Should support dict-based names that aren't\n # patterns. Will be useful when we support lists of names\n # for a command description where the arg_data can be set with\n # different fields based on which name was matched for the command\n if 're' not in name:\n command['name']['re'] = re.compile(name['pattern'])\n if name['re'].match(command_word):\n candidates.append((command, True))\n # FIXME: Probably should get rid of the following pattern code\n # and just use the above pattern compilation mechanism.\n # The following won't work when we require the command\n # descriptions to be pure data, e.g. derived from JSON data\n # or something like that.\n elif type(name) == dict and \\\n name['re'].match(command_word):\n candidates.append((command, False))\n else:\n raise error.CommandDescriptionError('Command description name '\n 'must be either string, dict, or pattern', command)\n \n except Exception, _e:\n if debug.cli():\n print _line(), 'Backtrace'\n traceback.print_exc()\n raise error.CommandDescriptionError('Missing mode or name', command)\n \n return candidates", "def auto_import_commands():\n import re,os\n import topo\n import __main__\n\n # CEBALERT: this kind of thing (topo.__file__) won't work with\n # py2exe and similar tools\n topo_path = os.path.join(os.path.split(topo.__file__)[0],\"command\")\n for f in os.listdir(topo_path):\n if re.match('^[^_.].*\\.py$',f):\n modulename = re.sub('\\.py$','',f)\n exec \"from topo.command.\"+modulename+\" import *\" in __main__.__dict__\n exec \"from topo.command import *\" in __main__.__dict__", "def stubFunc( *args, **keywords ):\n maya.cmds.dynamicLoad( library )\n # call the real function which has replaced us\n return maya.cmds.__dict__[command]( *args, **keywords )", "def _resolve_required_macros(file_content):\n call_commands = \"\"\n for line in file_content.split(\"\\n\"):\n match = re.search(\"^!@require\\s+([^\\s]+).*$\", line)\n if match is not None:\n required_macros = _add_macro_lib_ending(match.group(1))\n required_macros_file = abspath(join(LIB, required_macros))\n call_commands += _resolve_required_macros(_read_input_file(required_macros_file))\n call_commands += \"call, file = \\\"\" + required_macros_file + \"\\\";\\n\"\n return call_commands", "def register_commands(self):\n for module in copy.copy(sys.modules).values():\n for command in module_functionalities(module, 'MARA_CLICK_COMMANDS', click.Command):\n if 'callback' in command.__dict__ and command.__dict__['callback']:\n package = command.__dict__['callback'].__module__.rpartition('.')[0]\n if package != 'flask':\n register_command(self, command, package)", "def receive(self, command_list):\n for cmd in command_list:\n self._handle_command(cmd)", "def build_commands(self) -> list:\r\n commands: list = []\r\n\r\n arguments = CommandArguments()\r\n\r\n compiler_path: str = self.options.compiler_path\r\n flags_path: str = self.options.flags_path\r\n output_path: str = self.options.output_path\r\n\r\n if self.options.no_incremental_build:\r\n psc_paths: dict = self.psc_paths\r\n else:\r\n psc_paths = self._try_exclude_unmodified_scripts()\r\n\r\n # add .psc scripts whose .pex counterparts do not exist\r\n for object_name, script_path in self.missing_scripts.items():\r\n if object_name not in psc_paths.keys():\r\n psc_paths[object_name] = script_path\r\n\r\n source_import_paths = deepcopy(self.import_paths)\r\n\r\n # TODO: depth sorting solution is not foolproof! parse psc files for imports to determine command order\r\n for object_name, script_path in psc_paths.items():\r\n import_paths: list = self.import_paths\r\n\r\n if self.options.game_type != GameType.FO4:\r\n object_name = script_path\r\n\r\n # remove unnecessary import paths for script\r\n if self.options.game_type == GameType.FO4:\r\n for import_path in reversed(self.import_paths):\r\n if self._can_remove_folder(import_path, object_name, script_path):\r\n import_paths.remove(import_path)\r\n\r\n arguments.clear()\r\n arguments.append(compiler_path, enquote_value=True)\r\n arguments.append(object_name, enquote_value=True)\r\n arguments.append(flags_path, key='f', enquote_value=True)\r\n arguments.append(';'.join(import_paths), key='i', enquote_value=True)\r\n arguments.append(output_path, key='o', enquote_value=True)\r\n\r\n if self.options.game_type == GameType.FO4:\r\n # noinspection PyUnboundLocalVariable\r\n if self.release:\r\n arguments.append('-release')\r\n\r\n # noinspection PyUnboundLocalVariable\r\n if self.final:\r\n arguments.append('-final')\r\n\r\n if self.optimize:\r\n arguments.append('-op')\r\n\r\n arg_s = arguments.join()\r\n commands.append(arg_s)\r\n\r\n self.import_paths = source_import_paths\r\n\r\n return commands", "def receive(self, command_list):\n for cmd in command_list:\n self._send_cmd_with_mapped_ids(cmd)", "def _run_commands(self, command_list):\n for cmd in command_list:\n print(cmd)\n if not self.dry_run:\n run(cmd)", "def _parse_and_build_commands(self):\n for root in self.roots:\n for commands in root.iter('commands'):\n for command_element in commands.iter('command'):\n try:\n self._collect_command(command_element)\n\n except Exception as exception:\n command_name = GLGenerator.get_command_name(command_element)\n print('Error processing command {}: {}'.format(command_name, str(exception)))\n raise\n\n extension_name_max_len = 0\n for extension in self.extensions:\n extension_name_max_len = max(extension_name_max_len, len(extension))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n\n for extension in sorted(set(self.extensions)):\n quoted_extension = '\"' + extension + '\"'\n declaration = f' Extension_{extension:{extension_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_extension_map.insert(std::pair<std::string, Extension>({0:{1}}, Extension::Extension_{2:{3}}));'.format(\n quoted_extension, extension_name_max_len + 2, extension, extension_name_max_len\n )\n case_entry = ' case Extension::Extension_{0:{1}}: return \"{0}\";'.format(\n extension, extension_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(f' Extension_Count = {enum_value:>6}')\n self.extension_enum_declarations = ',\\n'.join(declarations)\n self.extension_map_entries = '\\n'.join(map_entries)\n self.extension_case_entries = '\\n'.join(case_entries)\n\n commands = set(self.command_list)\n\n commands = sorted(commands)\n\n command_name_max_len = 0\n for command in commands:\n command_name_max_len = max(command_name_max_len, len(command))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n for command in commands:\n declaration = f' Command_{command:{command_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_command_map.insert(std::pair<std::string, Command>({0:{1}}, Command::Command_{2:{1}}));'.format(\n '\"' + command + '\"', command_name_max_len, command\n )\n case_entry = ' case Command::Command_{0:{1}}: return \"{0}\";'.format(\n command, command_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(' Command_Count = {:>6}'.format(enum_value))\n self.command_enum_declarations = ',\\n'.join(declarations)\n self.command_map_entries = '\\n'.join(map_entries)\n self.command_case_entries = '\\n'.join(case_entries)", "def parse_commands(self, commands):\n\n for command_str in commands:\n command_parts = command_str.split(' ')\n\n # Check if command string has at least 2 parts: '--cmd' and 'command_type'\n if len(command_parts) <= 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n # Extract command and parameters\n command_type = command_parts[1].lower()\n command_parameters = command_parts[2:len(command_parts)]\n\n # Form a command to be added to the command queue\n command = {}\n if command_type == 'load':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n folder_path = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.image_source.load_images\n command['parameters'] = {\n 'folder_path': folder_path\n }\n\n elif command_type == 'align':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n\n command['method'] = self.app_instance.align_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index\n }\n\n elif command_type == 'blend':\n # Check number of parameters\n if len(command_parameters) != 5:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n x = int(command_parameters[1])\n y = int(command_parameters[2])\n width = int(command_parameters[3])\n height = int(command_parameters[4])\n\n command['method'] = self.app_instance.blend_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index,\n 'x': x,\n 'y': y,\n 'width': width,\n 'height': height\n }\n\n elif command_type == 'save':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n filename = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.save_result\n command['parameters'] = {\n 'filename': filename\n }\n\n else:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n print \"[INFO] Queuing command: \" + command_str\n\n self.command_queue.append(command)", "def load(self):\n\n self.commands = {\n # Usual text commands (e.g. \"/echo 123\")\n 'user': {},\n 'owner': {\n 'load': self.load,\n 'modprobe': self.modprobe,\n 'rmmod': self.rmmod\n },\n # Modules for bot's reaction to a different message types\n 'text': {},\n 'photo': {},\n 'audio': {},\n 'video': {},\n 'sticker': {},\n 'voice': {}\n }\n\n for file in os.listdir('modules'):\n if file.endswith('.py'):\n command_type, command = file.split('_', 1)\n self.modprobe(self, command[:-3])", "def _load_commands(self):\n\n entry_points = pkg_resources.iter_entry_points(\n config.PROVIDER_EP_NAMESPACE)\n for entry_point in entry_points:\n self.logger.debug('found provider %r', entry_point.name)\n self._commands[entry_point.name] = entry_point.load()", "def _get_commands(package_name):\n # pylint: disable=line-too-long\n def lookup(cmd, dct):\n if not cmd:\n return dct\n if len(cmd) == 1:\n return dct[cmd[0]]\n return lookup(cmd[1:], dct[cmd[0]])\n\n def walking_import(module, cmd, dct):\n car, cdr = cmd[0], cmd[1:]\n if cdr:\n walking_import(module, cdr, dct[car])\n elif car not in dct:\n __import__(module)\n dct.setdefault(car, {})['__module__'] = sys.modules[module]\n\n __import__(COMMANDS_PACKAGE_NAME)\n command_module = sys.modules[COMMANDS_PACKAGE_NAME]\n for _, module, _ in pkgutil.walk_packages(command_module.__path__, prefix=command_module.__name__+'.'):\n if not (module.endswith('__main__') or '.tests' in module):\n try:\n lookup(_command_as_list(module), _COMMANDS)\n except KeyError:\n walking_import(module, _command_as_list(module), _COMMANDS)\n return lookup(_command_as_list(package_name), _COMMANDS)", "def _load_command_dict(self, path=None):", "def commands(self, commands):\n\n self._commands = commands", "def loadCommands(self, objectName, fileName, encoding=\"utf-8\"):\n\n counter = 0\n commands = self.__commands\n\n # Method for being used as a decorator to share methods to the outside\n def share(func):\n name = \"%s.%s\" % (objectName, func.__name__)\n if name in commands:\n raise Exception(\"Command %s already exists!\" % name)\n\n commands[name] = func\n\n nonlocal counter\n counter += 1\n\n return func\n\n # Execute given file. Using clean new global environment\n # but add additional decorator for allowing to define shared methods\n # and the session object (self).\n code = open(fileName, \"r\", encoding=encoding).read()\n exec(compile(code, os.path.abspath(fileName), \"exec\"), {\"share\" : share, \"session\" : self})\n\n # Export destination name as global\n Console.info(\"Imported %s.\", Console.colorize(\"%s commands\" % counter, \"magenta\"))\n\n return counter", "def commands():" ]
[ "0.7030482", "0.61618656", "0.6115203", "0.5982748", "0.5949485", "0.58595926", "0.5814018", "0.5806937", "0.5794264", "0.5758835", "0.570823", "0.56881213", "0.5687735", "0.5672729", "0.5666066", "0.56065243", "0.5588865", "0.5588816", "0.5586607", "0.55086136", "0.5474133", "0.5464979", "0.5450885", "0.54421896", "0.5431658", "0.5430201", "0.5429099", "0.5427738", "0.54277366", "0.54274654" ]
0.8668142
0
This function creates rectangular in radom point with random params Exit code
def add_rand_rect() -> int: r = randint(10, 100) x = randint(r, WIDTH - r) y = randint(r, HEIGHT - r) vx = randint(-10, 10) vy = randint(-10, 10) color = COLORS[randint(0, len(COLORS) - 1)] add_rect(x, y, r, vx, vy, color, int(-100 / r)) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def generateBoxPoints(frame_resolution, min_dim_rect = 80, max_dim_rect = 160, limit_x = (-1, -1), limit_y = (-1, -1)):\n \n randint = np.random.randint\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n # Generate point 1 (pt1)\n \n if(limit_x != (-1, -1)): x1 = randint(limit_x[0], limit_x[1])\n else: x1 = randint(0, frame_resolution[0])\n \n if(limit_y != (-1, -1)): y1 = randint(limit_y[0], limit_y[1])\n else: y1 = randint(0, frame_resolution[1])\n \n pt1 = (x1, y1)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Generate point 2 (pt2)\n \n bad_point = True\n \n # Since the random generation pt2 can have invalid coordinate. So the script continue to generat point until a valid point is generated\n while(bad_point):\n x2 = x1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n y2 = y1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n \n if not (x2 > frame_resolution[0] or x2 < 0 or y2 > frame_resolution[1] or y2 < 0): bad_point = False\n \n if(limit_x != (-1, -1) and (x2 < limit_x[0] or x2 > limit_x[1])): bad_point = True\n if(limit_y != (-1, -1) and (y2 < limit_y[0] or y2 > limit_y[1])): bad_point = True\n \n pt2 = (x2, y2)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n return pt1, pt2", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def radon_rectangles(N,theta_vec,R, circle = False):\n \n #Rescaling according to image size \n R[:,0] = R[:,0]*N/2\n R[:,1] = (R[:,1])*N/2\n R[:,2] = (R[:,2])*N/2\n R[:,3] = (R[:,3])*N/2\n R[:,4] = R[:,4]*math.pi/180\n \n [t_vec, grid_t, grid_theta] = build_t_theta_pixel(N,theta_vec, circle=circle);\n (nrow, ncol) = R.shape;\n tmp = np.zeros((nrow,len(grid_theta)))\n \n for i in range(nrow):\n m = R[i,2]/2;\n n = R[i,3]/2;\n grid_theta_new = grid_theta - R[i,4];\n grid_t_new = (grid_t - R[i,0]*np.cos(grid_theta)- R[i,1]*np.sin(grid_theta));\n for j in range(len(grid_theta)):\n theta_new = grid_theta_new[j];\n t_new = grid_t_new[j];\n if theta_new== 0:\n if abs(t_new)< m:\n v1 = -n;\n v2 = n;\n else:\n v1 = 0;\n v2 = 0;\n #endif\n else:\n v1 = (t_new*np.cos(theta_new)- m)/np.sin(theta_new);\n v2 = (t_new*np.cos(theta_new)+ m)/np.sin(theta_new);\n #endif\n if theta_new == np.pi/2:\n if abs(t_new)< n:\n h1 = - m;\n h2 = m;\n else:\n h1 = 0;\n h2 = 0;\n #endif\n else:\n h1 = (n - t_new*np.sin(theta_new))/np.cos(theta_new);\n h2 = (- n - t_new*np.sin(theta_new))/np.cos(theta_new);\n #endif\n vmax = np.maximum(v1,v2);\n vmin = np.minimum(v1,v2);\n hmax = np.maximum(h1,h2);\n hmin = np.minimum(h1,h2);\n entryval = np.maximum(vmin,hmin);\n exitval = np.minimum(vmax,hmax);\n\n if (exitval - entryval) > 0:\n tmp[i,j] = R[i,5]*(exitval-entryval);\n else:\n tmp[i,j] = 0;\n #endif\n #endfor\n #endfor\n radvec = np.sum(tmp,axis = 0);\n analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))));\n\n return analytical_sinogram", "def create_asteroid_r(self):\n self.create_asteroid_common(8, self.colors[0], self.colors[0], enemy=True)", "def drawRectangle(x,y,width,height,rounding=0,ucoords=1):\n if ucoords:\n dislin.rlrnd(x,y,width,height,rounding)\n else:\n dislin.rndrec(x,y,width,height,rounding)", "def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)", "def __init__(self, x, y):\n self.height = x\n self.width = y\n self.grid = self.initialize(self.height, self.width)\n self.randx = random.randint(0, self.height-1)\n self.randy = random.randint(0, self.width-1)\n #self.make()\n #self.show()", "def shift_point_rect(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n rectangle = mutated_genome[index][2]\n seed = random.randint(0,1)\n oldpoint = rectangle[seed]\n newpoint = (oldpoint+Xval, newpoint+Yval)\n newrectangle = list(rectangle)\n newrectangle[seed] = newpoint\n mutated_genome[index][2] = tuple(newrectangle)", "def gen_rdm_points_square(polygon, size):\n minx, miny, maxx, maxy = polygon.bounds\n box_points = list(box(minx, miny, maxx, maxy, ccw=True).exterior.coords)\n x = np.random.uniform(low=box_points[0][0], high=box_points[2][0], size=size)\n y = np.random.uniform(low=box_points[0][1], high=box_points[2][1], size=size)\n return np.array(list(zip(x, y)))", "def circle(r=0):\n\tteta = 2*pi*random()\n\tx = (r+1)*cos(teta) + L//2\n\ty = (r+1)*sin(teta) + L//2\n\t\n\ti = int(x) + 1\n\tj = int(y) + 1\n\tprint(r)\n\treturn i,j", "def _test_rectangle(self):\n x = self.emulator.x\n y = self.emulator.y\n dxy = 1000\n logger.debug(\"Rectangle is set\")\n self.emulator.set_rectangle(\n self.emulator.vertex_pool.x_to_lon(x - dxy),\n self.emulator.vertex_pool.y_to_lat(y - dxy),\n self.emulator.vertex_pool.x_to_lon(x + dxy),\n self.emulator.vertex_pool.y_to_lat(y + dxy)\n )\n self.response(200)", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def rect(rng, lines, columns):\n\n w = rng.randint(1, max(1, lines // 2))\n h = rng.randint(1, max(1, columns // 2))\n\n i = rng.randint(0, lines - h)\n j = rng.randint(0, columns - w)\n \n return i, j, w, h", "def rectangular(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n delta1 = float(len1)/m\n delta2 = float(len2)/n\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class Index(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n\n index = Index(n,m)\n\n points = num.zeros((Np, 2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[index(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros((Nt, 3), int)\n boundary = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n nt = nt + 1\n i1 = index(i,j+1)\n i2 = index(i,j)\n i3 = index(i+1,j+1)\n i4 = index(i+1,j)\n\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[nt, 2] = 'right'\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2] #Lower element\n nt = nt + 1\n\n if i == 0:\n boundary[nt, 2] = 'left'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3] #Upper element\n\n return points, elements, boundary", "def r(w,rangestart,rangeend):\r\n if w == 'r':\r\n print(random.random(rangestart , rangeend))\r\n if w == 'ri':\r\n print(random.randint(rangestart,rangeend))", "def asteroidCreator(numCorner,win):\n\n xCoor = []\n yCoor = []\n\n # Creating coordinates of the points\n coorRange = [i for i in range(-10,10) if i not in [0]] # to avoid 0\n\n for i in range(numCorner):\n xCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n yCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n\n # Sorting the coordinates\n bubbleSort(xCoor,len(xCoor))\n bubbleSort(yCoor,len(yCoor))\n\n\n # Isolating the extreme points\n xSmallest = xCoor.pop(0)\n xLargest = xCoor.pop()\n\n ySmallest = yCoor.pop(0)\n yLargest = yCoor.pop()\n\n # Shuffle the coordinates\n random.shuffle(xCoor)\n random.shuffle(yCoor)\n\n # Divide them into two sets\n xCoorLower = xCoor[:len(xCoor)//2]\n xCoorUpper = xCoor[len(xCoor)//2:]\n\n yCoorLower = yCoor[:len(yCoor)//2]\n yCoorUpper = yCoor[len(yCoor)//2:]\n\n # Append back the extreme points, and sort them again\n xCoorLower.append(xSmallest)\n xCoorLower.append(xLargest)\n xCoorUpper.append(xSmallest)\n xCoorUpper.append(xLargest)\n\n yCoorLower.append(ySmallest)\n yCoorLower.append(yLargest)\n yCoorUpper.append(ySmallest)\n yCoorUpper.append(yLargest)\n\n bubbleSort(xCoorLower,len(xCoorLower))\n bubbleSort(xCoorUpper,len(xCoorUpper))\n bubbleSort(yCoorLower,len(yCoorLower))\n bubbleSort(yCoorUpper,len(yCoorUpper))\n\n # Getting the vector lengths out of the points\n # We will get vectors in 4 directions from 4 lists\n xVectorLengths = []\n yVectorLengths = []\n\n for i in range(len(xCoorLower)-1):\n xVectorLengths.append(xCoorLower[i]-xCoorLower[i+1])\n for i in range(len(xCoorUpper)-1):\n xVectorLengths.append(xCoorUpper[i+1]-xCoorUpper[i])\n for i in range(len(yCoorLower)-1):\n yVectorLengths.append(yCoorLower[i]-yCoorLower[i+1])\n for i in range(len(yCoorUpper)-1):\n yVectorLengths.append(yCoorUpper[i+1]-yCoorUpper[i])\n\n random.shuffle(xVectorLengths)\n random.shuffle(yVectorLengths)\n\n # Creating the vectors\n vectors = []\n defaultVector = [0,0]\n\n for i in range(len(xVectorLengths)):\n defaultVector[0] = round(xVectorLengths[i],2)\n defaultVector[1] = round(yVectorLengths[i],2)\n vectors.append(defaultVector.copy())\n\n # Sorting vectors by their angle\n sortedVectors = []\n quadrant1 = []\n quadrant2 = []\n quadrant3 = []\n quadrant4 = []\n\n ### Dividing them by quadrants\n for vector in vectors:\n if vector[0] >= 0 and vector[1] >= 0:\n quadrant1.append(vector)\n elif vector[0] <= 0 and vector[1] >= 0:\n quadrant2.append(vector)\n elif vector[0] <= 0 and vector[1] <= 0:\n quadrant3.append(vector)\n elif vector[0] >= 0 and vector[1] <= 0:\n quadrant4.append(vector)\n\n ### Sorting them inside the quadrants\n quadrant1 = angleSort(quadrant1,1,len(quadrant1))\n quadrant2 = angleSort(quadrant2,2,len(quadrant2))\n quadrant3 = angleSort(quadrant3,3,len(quadrant3))\n quadrant4 = angleSort(quadrant4,4,len(quadrant4))\n\n ### Adding them up in order\n for vector in quadrant1:\n sortedVectors.append(vector)\n for vector in quadrant2:\n sortedVectors.append(vector)\n for vector in quadrant3:\n sortedVectors.append(vector)\n for vector in quadrant4:\n sortedVectors.append(vector)\n\n # Creating the points for the polygon\n points = []\n points = vectorsToPoints(sortedVectors,points)\n\n rightEdge = 0\n leftEdge = 0\n upperEdge = 0\n lowerEdge = 0\n\n # getting the boundaries for the asteroid\n for point in points:\n if point[0] > rightEdge:\n rightEdge = point[0]\n elif point[0] < leftEdge:\n leftEdge = point[0]\n if point[1] > upperEdge:\n upperEdge = point[1]\n elif point[1] < lowerEdge:\n lowerEdge = point[1]\n\n # Width and height are only required since it is a child of rotating_block class\n width = rightEdge - leftEdge\n height = upperEdge - lowerEdge\n\n centerPoint = [(rightEdge + leftEdge) / 2 , (upperEdge + lowerEdge) / 2]\n\n asteroid = pho.Asteroid(win,width,height,points,centerPoint[0],centerPoint[1])\n\n return asteroid", "def get_random_coords(width, height):\n return randrange(1, width-2), randrange(1, height-2)", "def regenerate(self, random_state):\n self._walls_body.geom.clear()\n corridor_width = variation.evaluate(self._corridor_width,\n random_state=random_state)\n corridor_length = variation.evaluate(self._corridor_length,\n random_state=random_state)\n self._current_corridor_length = corridor_length\n self._current_corridor_width = corridor_width\n\n self._ground_plane.pos = [corridor_length / 2, 0, 0]\n self._ground_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, corridor_width / 2, 1]\n\n self._left_plane.pos = [\n corridor_length / 2, corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._left_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._right_plane.pos = [\n corridor_length / 2, -corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._right_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._near_plane.pos = [\n -_CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._near_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._far_plane.pos = [\n corridor_length + _CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._far_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]", "def generate_random_point(xmin,xmax,ymin,ymax):\n\tnp.random.seed()\n\tx_rand = np.random.uniform(xmin,xmax)\n\ty_rand = np.random.uniform(ymin,ymax)\n\treturn(x_rand,y_rand)", "def add_rand_circ() -> int:\n r = randint(10, 100)\n x = randint(r, WIDTH - r)\n y = randint(r, HEIGHT - r)\n vx = randint(-10, 10)\n vy = randint(-10, 10)\n color = COLORS[randint(0, len(COLORS) - 1)]\n add_circ(x, y, r, vx, vy, color, int(100 / r))\n return 0", "def trial(length, height):\n screen.refresh()\n global stimList\n global oddLength\n global oddHeight\n currentLength = int(maxLength / 4)\n currentHeight = int(maxHeight / 4)\n for i in range(stimAmt):\n if i == oddLocation:\n oddLength = currentLength\n oddHeight = currentHeight\n stimList.append(\n pg.draw.rect(\n screen.fg,\n PgTools.rand_color(),\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n i=(randint(0, 2), randint(0, 1)),\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), oddSeed)\n else:\n stimList.append(\n pg.draw.rect(\n screen.fg,\n color,\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n patColor,\n randNums,\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), regSeed)\n currentLength += maxLength / 4\n currentLength = int(currentLength)\n if (i + 1) % 3 == 0:\n currentLength = maxLength / 4\n currentLength = int(currentLength)\n currentHeight += maxHeight / 4\n currentHeight= int(currentHeight)", "def rect(r, theta):\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x,y", "def test_rocket():\n ring = [(0,0), (10, 0), (15,5), (10,9), (1,7), (6,4), (0,0)]\n conv = ToPointsAndSegments()\n conv.add_polygon([ring])\n skel = calc_skel(conv, output=True, pause=True)\n print \"DONE\"", "def generate_point(width, height):\n x = random.randrange(0 - OFFSET, width + OFFSET, 1)\n y = random.randrange(0 - OFFSET, height + OFFSET, 1)\n return (x, y)", "def rand_inside(x1, y1, x2, y2):\n\n rx = map_between(random.random(), x1, x2)\n ry = map_between(random.random(), y1, y2)\n\n return rx, ry", "def boundaries_and_initialize():\n greenLower = (29, 86, 6) # define the lower and upper boundaries of the \"green\"\n greenUpper = (64, 255, 255)\n pts = [((200,300),(255,255,255), 0)]\n blanks = []\n linecolor = (0,0,0)\n counter = 1\n radius = 11\n return greenLower, greenUpper, pts, linecolor, counter, blanks, radius", "def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))", "def _rectangles(m, n):\n return m * (m+1) * n * (n+1) // 4" ]
[ "0.6723634", "0.6521465", "0.61302173", "0.6013927", "0.5988419", "0.5915515", "0.5871867", "0.5845545", "0.58222014", "0.58034813", "0.57898873", "0.5784966", "0.57652074", "0.5758353", "0.57454664", "0.5727684", "0.5677776", "0.5668007", "0.5667086", "0.56670827", "0.5663948", "0.5649943", "0.56276745", "0.56231594", "0.56195146", "0.56111985", "0.56073093", "0.56028074", "0.5590326", "0.5547702" ]
0.6561995
1
Multiplatform dependency resolution for PEX files. Given a pants configuration and a set of requirements, return a list of distributions that must be included in order to satisfy them. That may involve distributions for multiple platforms.
def resolve_multi(config, requirements, interpreter=None, platforms=None, conn_timeout=None, ttl=3600): distributions = dict() interpreter = interpreter or PythonInterpreter.get() if not isinstance(interpreter, PythonInterpreter): raise TypeError('Expected interpreter to be a PythonInterpreter, got %s' % type(interpreter)) install_cache = PythonSetup(config).scratch_dir('install_cache', default_name='eggs') platforms = get_platforms(platforms or config.getlist('python-setup', 'platforms', ['current'])) for platform in platforms: translator = Translator.default( install_cache=install_cache, interpreter=interpreter, platform=platform, conn_timeout=conn_timeout) obtainer = PantsObtainer( install_cache=install_cache, crawler=crawler_from_config(config, conn_timeout=conn_timeout), fetchers=fetchers_from_config(config) or [PyPIFetcher()], translators=translator) distributions[platform] = resolve(requirements=requirements, obtainer=obtainer, interpreter=interpreter, platform=platform) return distributions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolve_multi(config,\r\n requirements,\r\n interpreter=None,\r\n platforms=None,\r\n conn_timeout=None,\r\n ttl=3600):\r\n distributions = dict()\r\n interpreter = interpreter or PythonInterpreter.get()\r\n if not isinstance(interpreter, PythonInterpreter):\r\n raise TypeError('Expected interpreter to be a PythonInterpreter, got %s' % type(interpreter))\r\n\r\n install_cache = PythonSetup(config).scratch_dir('install_cache', default_name='eggs')\r\n platforms = get_platforms(platforms or config.getlist('python-setup', 'platforms', ['current']))\r\n\r\n for platform in platforms:\r\n translator = Translator.default(\r\n install_cache=install_cache,\r\n interpreter=interpreter,\r\n platform=platform,\r\n conn_timeout=conn_timeout)\r\n\r\n obtainer = PantsObtainer(\r\n install_cache=install_cache,\r\n crawler=crawler_from_config(config, conn_timeout=conn_timeout),\r\n fetchers=fetchers_from_config(config) or [PyPIFetcher()],\r\n translators=translator)\r\n\r\n distributions[platform] = resolve(requirements=requirements,\r\n obtainer=obtainer,\r\n interpreter=interpreter,\r\n platform=platform)\r\n\r\n return distributions", "def resolve(requirements, obtainer=None, interpreter=None, platform=None):\r\n cache = _DistributionCache()\r\n interpreter = interpreter or PythonInterpreter.get()\r\n platform = platform or Platform.current()\r\n obtainer = obtainer or Obtainer.default(platform=platform, interpreter=interpreter)\r\n\r\n requirements = maybe_requirement_list(requirements)\r\n distribution_set = defaultdict(list)\r\n requirement_set = defaultdict(list)\r\n processed_requirements = set()\r\n\r\n def packages(requirement, existing=None):\r\n if existing is None:\r\n existing = obtainer.iter(requirement)\r\n return [package for package in existing\r\n if package.satisfies(requirement)\r\n and package.compatible(interpreter.identity, platform)]\r\n\r\n def requires(package, requirement):\r\n if not cache.has(package):\r\n dist = obtainer.obtain(package)\r\n if dist is None:\r\n raise Untranslateable('Package %s is not translateable.' % package)\r\n if not distribution_compatible(dist, interpreter, platform):\r\n raise Untranslateable('Could not get distribution for %s on appropriate platform.' %\r\n package)\r\n cache.put(package, dist)\r\n dist = cache.get(package)\r\n return dist.requires(extras=requirement.extras)\r\n\r\n while True:\r\n while requirements:\r\n requirement = requirements.pop(0)\r\n requirement_set[requirement.key].append(requirement)\r\n # TODO(wickman) This is trivially parallelizable\r\n distribution_list = distribution_set[requirement.key] = packages(\r\n requirement,\r\n existing=distribution_set.get(requirement.key))\r\n if not distribution_list:\r\n raise Unsatisfiable('Cannot satisfy requirements: %s' % requirement_set[requirement.key])\r\n\r\n # get their dependencies\r\n for requirement_key, requirement_list in requirement_set.items():\r\n new_requirements = OrderedSet()\r\n highest_package = distribution_set[requirement_key][0]\r\n for requirement in requirement_list:\r\n if requirement in processed_requirements:\r\n continue\r\n new_requirements.update(requires(highest_package, requirement))\r\n processed_requirements.add(requirement)\r\n requirements.extend(list(new_requirements))\r\n\r\n if not requirements:\r\n break\r\n\r\n to_activate = set()\r\n for distributions in distribution_set.values():\r\n to_activate.add(cache.get(distributions[0]))\r\n return to_activate", "def _resolve_multi(self, interpreter, requirements, find_links):\n python_setup = PythonSetup.global_instance()\n python_repos = PythonRepos.global_instance()\n distributions = {}\n fetchers = python_repos.get_fetchers()\n fetchers.extend(Fetcher([path]) for path in find_links)\n\n for platform in python_setup.platforms:\n requirements_cache_dir = os.path.join(python_setup.resolver_cache_dir,\n str(interpreter.identity))\n distributions[platform] = resolve(\n requirements=[req.requirement for req in requirements],\n interpreter=interpreter,\n fetchers=fetchers,\n platform=None if platform == 'current' else platform,\n context=python_repos.get_network_context(),\n cache=requirements_cache_dir,\n cache_ttl=python_setup.resolver_cache_ttl)\n\n return distributions", "def get_setup_requires(dist):\n reqs = dist.command_options.get('metadata', {}).get('setup_requires')\n if reqs:\n return pkg_resources.parse_requirements([i.strip()\n for i in reqs[1].split('\\n')\n if i.strip()])\n return []", "def get_setup_requires(dist):\n reqs = dist.command_options.get('metadata', {}).get('setup_requires')\n if reqs:\n return pkg_resources.parse_requirements([i.strip()\n for i in reqs[1].split('\\n')\n if i.strip()])\n return []", "def check_requirements(config=None):\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )", "def debian_dependencies(self):\n dependencies = set()\n for requirement in self.python_requirements:\n debian_package_name = self.converter.transform_name(requirement.project_name, *requirement.extras)\n if requirement.specs:\n for constraint, version in requirement.specs:\n try:\n version = self.converter.transform_version(self, requirement.project_name, version)\n except:\n version = 'dev'\n if version == 'dev':\n # Requirements like 'pytz > dev' (celery==3.1.16) don't\n # seem to really mean anything to pip (based on my\n # reading of the 1.4.x source code) but Debian will\n # definitely complain because version strings should\n # start with a digit. In this case we'll just fall\n # back to a dependency without a version specification\n # so we don't drop the dependency.\n dependencies.add(debian_package_name)\n elif constraint == '==':\n dependencies.add('%s (= %s)' % (debian_package_name, version))\n elif constraint == '~=':\n dependencies.add('%s (>= %s)' % (debian_package_name, version))\n elif constraint == '!=':\n values = (debian_package_name, version, debian_package_name, version)\n dependencies.add('%s (<< %s) | %s (>> %s)' % values)\n elif constraint == '<':\n dependencies.add('%s (<< %s)' % (debian_package_name, version))\n elif constraint == '>':\n dependencies.add('%s (>> %s)' % (debian_package_name, version))\n elif constraint in ('<=', '>='):\n dependencies.add('%s (%s %s)' % (debian_package_name, constraint, version))\n else:\n msg = \"Conversion specifier not supported! (%r used by Python package %s)\"\n raise Exception(msg % (constraint, self.python_name))\n else:\n dependencies.add(debian_package_name)\n dependencies = sorted(dependencies)\n logger.debug(\"Debian dependencies of %s: %r\", self, dependencies)\n return dependencies", "def install_requires():\n return reqs(\"requirements.txt\")", "def install_requires():\n return reqs('requirements.txt')", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links", "def process_requirements(requirements, version=None):\n if requirements is None:\n return []\n\n if isinstance(requirements, list):\n return requirements\n\n if isinstance(requirements, dict):\n # The version \"dev\" should always compare as greater than any exisiting versions.\n dev_numeric = \"9999.9999.9999\"\n\n if version == DEV_VERSION:\n version = dev_numeric\n\n for ver_spec, packages in requirements.items():\n op_and_ver_pairs = map(get_operator_and_version, ver_spec.split(\",\"))\n match_all = all(\n comp_op(\n Version(version),\n Version(dev_numeric if req_ver == DEV_VERSION else req_ver),\n )\n for comp_op, req_ver in op_and_ver_pairs\n )\n if match_all:\n return packages\n return []\n\n raise TypeError(\"Invalid object type for `requirements`: '{}'\".format(type(requirements)))", "def projects_from_requirements(requirements_path):\n reqs = pip.req.parse_requirements(requirements_path)\n return [req.name for req in reqs]", "def _package_ids_satisfying_requirement(pool, requirement):\n for package in pool.what_provides(requirement):\n yield pool.package_id(package)", "def _satisfied(self, req, source=None):\n dists = [dist for dist in self._env[req.project_name] if (\n dist in req and (\n dist.location not in self._site_packages or\n self.allow_site_package_egg(dist.project_name)))]\n if not dists:\n log.debug('We have no distributions for %s that satisfies %r.',\n req.project_name, str(req))\n return None, self._obtain(req, source)\n\n for dist in dists:\n if (dist.precedence == pkg_resources.DEVELOP_DIST):\n log.debug('We have a develop egg: %s', dist)\n return dist, None\n\n return easy_install.Installer._satisfied(self, req, source)", "def getRequirements():\n\n \n cudaLibsOk = checkCUDAisAvailable() \n \n conditionalRequirements = []\n if cudaLibsOk:\n conditionalRequirements += [\"tensorflow-gpu==1.15.3\", ]\n else:\n print(\"\\n CUDA it's not available in your machine.\")\n print(\" You won't be able to use the GPU support.\\n\")\n #if olderPip or olderSetuptools:\n #tfRequirement = \"tensorflow==1.15.0\"\n #else:\n tfRequirement = \"tensorflow==1.15.3\"\n \n conditionalRequirements += [tfRequirement]\n\n return conditionalRequirements", "def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:\n import pkg_resources\n\n failed_pkgs_msgs: List[str] = []\n conflicting_pkgs_msgs: List[str] = []\n\n for req in requirements:\n try:\n pkg_resources.require(req)\n except pkg_resources.DistributionNotFound as dnf:\n failed_pkgs_msgs.append(dnf.report())\n except pkg_resources.VersionConflict as vc:\n conflicting_pkgs_msgs.append(vc.report())\n except Exception:\n msg.warn(\n f\"Unable to check requirement: {req} \"\n \"Checks are currently limited to requirement specifiers \"\n \"(PEP 508)\"\n )\n\n if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):\n msg.warn(\n title=\"Missing requirements or requirement conflicts detected. Make sure your Python environment is set up \"\n \"correctly and you installed all requirements specified in your project's requirements.txt: \"\n )\n for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs:\n msg.text(pgk_msg)\n\n return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0", "def find_requirements(root: str) -> Optional[Dict[str, bool]]:\n findings = {\n file_name: os.path.isfile(os.path.join(root, file_name))\n for file_name in [\"requirements.txt\", \"Pipfile\", \"Pipfile.lock\"]\n }\n\n if not sum(findings.values()):\n return None\n return findings", "def python_requirements(self):\n try:\n dist = self.requirement.pip_requirement.get_dist()\n extras = self.requirement.pip_requirement.extras\n requirements = list(dist.requires(extras))\n except Exception:\n logger.warning(\"Failed to determine installation requirements of %s \"\n \"using pkg-resources, falling back to old implementation.\",\n self, exc_info=True)\n requirements = self.python_requirements_fallback\n logger.debug(\"Python requirements of %s: %r\", self, requirements)\n return requirements", "def get_requires_for_build_wheel(config_settings=None):\n info = read_flit_config(pyproj_toml)\n # If we can get version & description from pyproject.toml (PEP 621), or\n # by parsing the module (_via_ast), we don't need any extra\n # dependencies. If not, we'll need to try importing it, so report any\n # runtime dependencies as build dependencies.\n want_summary = 'description' in info.dynamic_metadata\n want_version = 'version' in info.dynamic_metadata\n\n module = Module(info.module, Path.cwd())\n docstring, version = get_docstring_and_version_via_ast(module)\n\n if (want_summary and not docstring) or (want_version and not version):\n return info.metadata.get('requires_dist', [])\n else:\n return []", "def _resolve_dependencies(self):\n matching_versions = dict()\n\n # Initialization of the BFS\n bfs_stack = list()\n for requirement_name, spec_str in sorted(self.spec_requirements, key=lambda x: x[0].lower()):\n self._add_spec(requirement_name, spec_str)\n bfs_stack.append(requirement_name)\n\n # Main loop\n while bfs_stack:\n # Stack Unwind\n requirement_name = bfs_stack.pop(0)\n available_versions = self._get_available_versions(requirement_name)\n spec = self._get_spec(requirement_name)\n best_matching_version = spec.select(available_versions)\n if best_matching_version is None:\n msg = 'Unmatched dependency for {}\\nSpecification requirement: {}\\nAvailable versions: {}\\n' \\\n 'Use NPM semver calculator to resolve: https://semver.npmjs.com/'\n error = msg.format(requirement_name, spec, ', '.join(reversed(map(str, available_versions))))\n raise RequirementMatchError(error)\n\n matching_versions[requirement_name] = best_matching_version\n\n # BFS stack population with dependencies\n dependencies = self._get_dependencies(requirement_name, best_matching_version)\n for dependency_name, dependency_version in dependencies:\n self._add_spec(dependency_name, dependency_version)\n bfs_stack.append(dependency_name)\n\n return matching_versions", "def parse_requirements(filename, *args): # pragma: no cover\n # type: (str, str) -> Tuple[InstallReqSet, pip.index.PackageFinder]\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n requirements = pip.req.parse_requirements(\n filename,\n finder=repository.finder,\n session=repository.session,\n options=pip_options)\n return set(requirements), repository.finder", "def _resolve_depenency_map(\n requested_requirements, # type: t.Iterable[Requirement]\n galaxy_apis, # type: t.Iterable[GalaxyAPI]\n concrete_artifacts_manager, # type: ConcreteArtifactsManager\n preferred_candidates, # type: t.Iterable[Candidate] | None\n no_deps, # type: bool\n allow_pre_release, # type: bool\n upgrade, # type: bool\n include_signatures, # type: bool\n): # type: (...) -> dict[str, Candidate]\n if not HAS_RESOLVELIB:\n raise AnsibleError(\"Failed to import resolvelib, check that a supported version is installed\")\n if not HAS_PACKAGING:\n raise AnsibleError(\"Failed to import packaging, check that a supported version is installed\")\n try:\n dist = distribution('ansible-core')\n except Exception:\n req = None\n else:\n req = next((rr for r in (dist.requires or []) if (rr := PkgReq(r)).name == 'resolvelib'), None)\n finally:\n if req is None:\n # TODO: replace the hardcoded versions with a warning if the dist info is missing\n # display.warning(\"Unable to find 'ansible-core' distribution requirements to verify the resolvelib version is supported.\")\n if not RESOLVELIB_LOWERBOUND <= RESOLVELIB_VERSION < RESOLVELIB_UPPERBOUND:\n raise AnsibleError(\n f\"ansible-galaxy requires resolvelib<{RESOLVELIB_UPPERBOUND.vstring},>={RESOLVELIB_LOWERBOUND.vstring}\"\n )\n elif not req.specifier.contains(RESOLVELIB_VERSION.vstring):\n raise AnsibleError(f\"ansible-galaxy requires {req.name}{req.specifier}\")\n\n collection_dep_resolver = build_collection_dependency_resolver(\n galaxy_apis=galaxy_apis,\n concrete_artifacts_manager=concrete_artifacts_manager,\n user_requirements=requested_requirements,\n preferred_candidates=preferred_candidates,\n with_deps=not no_deps,\n with_pre_releases=allow_pre_release,\n upgrade=upgrade,\n include_signatures=include_signatures,\n )\n try:\n return collection_dep_resolver.resolve(\n requested_requirements,\n max_rounds=2000000, # NOTE: same constant pip uses\n ).mapping\n except CollectionDependencyResolutionImpossible as dep_exc:\n conflict_causes = (\n '* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format(\n req=req_inf.requirement,\n dep_origin='direct request'\n if req_inf.parent is None\n else 'dependency of {parent!s}'.\n format(parent=req_inf.parent),\n )\n for req_inf in dep_exc.causes\n )\n error_msg_lines = list(chain(\n (\n 'Failed to resolve the requested '\n 'dependencies map. Could not satisfy the following '\n 'requirements:',\n ),\n conflict_causes,\n ))\n raise raise_from( # NOTE: Leading \"raise\" is a hack for mypy bug #9717\n AnsibleError('\\n'.join(error_msg_lines)),\n dep_exc,\n )\n except CollectionDependencyInconsistentCandidate as dep_exc:\n parents = [\n \"%s.%s:%s\" % (p.namespace, p.name, p.ver)\n for p in dep_exc.criterion.iter_parent()\n if p is not None\n ]\n\n error_msg_lines = [\n (\n 'Failed to resolve the requested dependencies map. '\n 'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) '\n 'which didn\\'t satisfy all of the following requirements:'.\n format(\n req=dep_exc.candidate,\n dep_origin='direct request'\n if not parents else 'dependency of {parent!s}'.\n format(parent=', '.join(parents))\n )\n )\n ]\n\n for req in dep_exc.criterion.iter_requirement():\n error_msg_lines.append(\n '* {req.fqcn!s}:{req.ver!s}'.format(req=req)\n )\n\n raise raise_from( # NOTE: Leading \"raise\" is a hack for mypy bug #9717\n AnsibleError('\\n'.join(error_msg_lines)),\n dep_exc,\n )\n except ValueError as exc:\n raise AnsibleError(to_native(exc)) from exc", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def check_requirements():\n debug(\"check_requirements\")\n needed = Requirements(Project).find_missing_requirements()\n if needed:\n info(\"Please add the following to your %s file:\\n\" % 'requirements.txt')\n info(\"\\n\".join(str(needed)))\n else:\n info(\"Your %s includes all known herringlib task requirements\" % 'requirements.txt')", "def get_dependencies():\n return config.check_driver_dependencies(\n __virtualname__, {\"profitbricks\": HAS_PROFITBRICKS}\n )", "def requires_package(prerequisites):\n return check_prerequisites(prerequisites, checker=_check_py_package)", "def get_required_packages(file_contents):\n # Make sure the only ``install_requires`` happens in the\n # call to setup()\n if file_contents.count(INST_REQS_KWARG) != 1:\n raise ValueError('Expected only one use of keyword',\n INST_REQS_KWARG, file_contents)\n # Make sure the only usage of ``install_requires`` is to set\n # install_requires=REQUIREMENTS.\n keyword_stmt = INST_REQS_KWARG + '=' + REQ_VAR\n if file_contents.count(keyword_stmt) != 1:\n raise ValueError('Expected keyword to be set with variable',\n INST_REQS_KWARG, REQ_VAR, file_contents)\n # Split file on ``REQUIREMENTS`` variable while asserting that\n # it only appear twice.\n _, reqs_section, _ = file_contents.split(REQ_VAR)\n # Find ``REQUIREMENTS`` list variable defined in ``reqs_section``.\n reqs_begin = reqs_section.index('[')\n reqs_end = reqs_section.index(']') + 1\n\n # Convert the text to an actual list, but make sure no\n # locals or globals can be used.\n reqs_list_text = reqs_section[reqs_begin:reqs_end]\n # We use literal_eval() because it limits to evaluating\n # strings that only consist of a few Python literals: strings,\n # numbers, tuples, lists, dicts, booleans, and None.\n requirements = ast.literal_eval(reqs_list_text)\n\n # Take the list of requirements and strip off the package name\n # from each requirement.\n result = []\n for required in requirements:\n parts = required.split()\n result.append(parts[0])\n return result", "def filter_working_set_hard(working_set, requirements):\n\n retval = pkg_resources.WorkingSet([])\n\n for req in requirements:\n dists = working_set.require(req)\n for dist in dists: retval.add(dist)\n\n return retval", "def dependency_check(dependency_set=CORE, exit_on_failure=True):\n verify_python_version()\n \n disable_warnings()\n\n platform = get_current_platform()\n\n #\n # Check for missing python modules\n #\n failed_deps = []\n pip_distributions = pip.get_installed_distributions()\n \n for w3af_req in platform.PIP_PACKAGES[dependency_set]:\n for dist in pip_distributions:\n if w3af_req.package_name.lower() == dist.project_name.lower():\n\n w3af_req_version = str(Version(w3af_req.package_version))\n dist_version = str(dist.version)\n\n if w3af_req_version == dist_version:\n # It's installed and the version matches!\n break\n else:\n failed_deps.append(w3af_req)\n\n #\n # Check for missing operating system packages\n #\n missing_os_packages = []\n for os_package in platform.SYSTEM_PACKAGES[dependency_set]:\n if not platform.os_package_is_installed(os_package):\n missing_os_packages.append(os_package)\n \n os_packages = list(set(missing_os_packages))\n\n # All installed?\n if not failed_deps and not os_packages:\n # False means: do not exit()\n enable_warnings()\n return False\n\n generate_requirements_txt(failed_deps)\n script_path = generate_helper_script(platform.PKG_MANAGER_CMD, os_packages,\n platform.PIP_CMD, failed_deps)\n\n #\n # Report the missing system packages\n #\n msg = ('w3af\\'s requirements are not met, one or more third-party'\n ' libraries need to be installed.\\n\\n')\n \n if os_packages:\n missing_pkgs = ' '.join(os_packages)\n \n msg += ('On %s systems please install the following operating'\n ' system packages before running the pip installer:\\n'\n ' %s %s\\n')\n print(msg % (platform.SYSTEM_NAME, platform.PKG_MANAGER_CMD,\n missing_pkgs))\n \n #\n # Report all missing python modules\n # \n if failed_deps:\n # pylint: disable=E1101\n msg = ('Your python installation needs the following modules'\n ' to run w3af:\\n')\n msg += ' ' + ' '.join([fdep.module_name for fdep in failed_deps])\n print(msg)\n print('\\n')\n # pylint: enable=E1101\n \n #\n # Report missing pip packages\n #\n not_git_pkgs = [fdep for fdep in failed_deps if not fdep.is_git]\n git_pkgs = [fdep.git_src for fdep in failed_deps if fdep.is_git]\n \n msg = ('After installing any missing operating system packages, use'\n ' pip to install the remaining modules:\\n')\n \n if not_git_pkgs:\n cmd = generate_pip_install_non_git(platform.PIP_CMD, not_git_pkgs)\n msg += ' %s\\n' % cmd\n \n if git_pkgs:\n for missing_git_pkg in git_pkgs:\n msg += ' %s\\n' % generate_pip_install_git(platform.PIP_CMD,\n missing_git_pkg)\n \n print(msg)\n \n msg = 'A script with these commands has been created for you at %s'\n print(msg % script_path)\n \n enable_warnings()\n platform.after_hook()\n \n if exit_on_failure:\n sys.exit(1)\n else:\n return True" ]
[ "0.7110897", "0.6849487", "0.6503301", "0.61512494", "0.61512494", "0.60071385", "0.5982181", "0.59683037", "0.5945177", "0.5940623", "0.59356564", "0.5918351", "0.5888679", "0.58608896", "0.58577836", "0.58559006", "0.5855422", "0.58526754", "0.58373266", "0.58249384", "0.5817922", "0.5813759", "0.57853115", "0.57790625", "0.5758196", "0.57518274", "0.5745865", "0.5743946", "0.5731266", "0.5730935" ]
0.71465963
0
Expand the init state parameter into the full state vector.
def _state_from_init(self, init_state: np.ndarray): assert ( self._init_space.shape == self._state_space.shape ), "Must override _state_from_init if init state space differs from state space!" return init_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_state(self) -> None:\n self.state = np.zeros(self.shape, dtype=int)", "def from_vect(self,instate):\n instate = np.reshape(instate,self.shape())\n statearr = self.to_array()\n statearr.values = instate\n self.update(statearr.to_dataset(dim='variable'))", "def initialize_state(self):\n super(InverseChain, self).initialize_state()", "def initial_state(self, init_cand_states, x):\n self.x = x[0].numpy()\n self.input_length = len(self.x)\n r = np.full((self.input_length, 2), self.logzero, dtype=np.float32)\n r[0, 1] = self.x[0, self.blank]\n for i in tf.range(1, self.input_length):\n r[i, 1] = r[i - 1, 1] + self.x[i, self.blank]\n self.init_state = r\n init_cand_states.append(\n tf.convert_to_tensor(np.array([[r] * self.num_classes]))\n )\n self.state_index = len(init_cand_states) - 1\n init_cand_states.append(\n tf.convert_to_tensor(np.array([[0] * self.num_classes]))\n )\n self.score_index = len(init_cand_states) - 1\n return init_cand_states", "def reset_reservoir(self):\n self.state = np.zeros((self.state_size,1),dtype=self.typefloat)", "def init(init_state) -> GelmanRubinState:\n n_chains, n_dims = init_state.position.shape\n w_state = w_init(n_chains, n_dims)\n return GelmanRubinState(w_state, 0, jnp.nan)", "def update_init_roberta_model_state(state):\n for k in list(state.keys()):\n if \".lm_head.\" in k or \"version\" in k:\n del state[k]\n continue\n # remove 'encoder/decoder.sentence_encoder.' from the key\n assert k.startswith(\"encoder.sentence_encoder.\") or k.startswith(\n \"decoder.sentence_encoder.\"\n ), f\"Cannot recognize parameter name {k}\"\n if \"layernorm_embedding\" in k:\n new_k = k.replace(\".layernorm_embedding.\", \".emb_layer_norm.\")\n state[new_k[25:]] = state[k]\n else:\n state[k[25:]] = state[k]\n del state[k]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]" ]
[ "0.6210097", "0.6167448", "0.60694176", "0.6065781", "0.60387975", "0.5996124", "0.5950681", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217", "0.5912217" ]
0.63458675
0
Initialize animation. Called by first render call.
def _init_anim(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super().__init__()\n self._active = False\n # Counter, used in the animation\n self._time = 0\n # Store the current image id, initially it's 'default'\n self._image = 'default'", "def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = init_frame.canvas.copy_from_bbox(init_ax.bbox)", "def _animation_init(self):\n\n self.animation_ax.set_xlim(self.plant.workspace_range[0][0],\n self.plant.workspace_range[0][1])\n self.animation_ax.set_ylim(self.plant.workspace_range[1][0],\n self.plant.workspace_range[1][1])\n self.animation_ax.set_xlabel(\"x position [m]\")\n self.animation_ax.set_ylabel(\"y position [m]\")\n for ap in self.animation_plots[:-1]:\n ap.set_data([], [])\n self.animation_plots[-1].set_text(\"t = 0.000\")\n\n self.tau_arrowarcs = []\n self.tau_arrowheads = []\n for link in range(self.plant.n_links):\n arc, head = get_arrow(radius=0.001,\n centX=0,\n centY=0,\n angle_=110,\n theta2_=320,\n color_=\"red\")\n self.tau_arrowarcs.append(arc)\n self.tau_arrowheads.append(head)\n self.animation_ax.add_patch(arc)\n self.animation_ax.add_patch(head)\n\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def __init__(self):\n super().__init__()\n self.texture = arcade.load_texture(\":resources:/images/enemies/slimeBlue.png\")\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def start(self):\n self.setup_initializer()\n self.setup_fader()\n self.fade_out_duration = 1.2", "def init():\n uanim.set_data([],[])\n return uanim,", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def Start(self): # this is used to start the object\n ani = anim.FuncAnimation(self.f, self.animate, interval=1000)\n # animating object wth 1 sec gap\n self.plt_0.tight_layout()\n self.plt_0.show()\n # showing the plot", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def init_graphics(self):\n if type(self.image_ref) is Surface:\n # This is the case for the special visual effect\n self.image = self.image_ref\n else:\n image = GLOBAL.img(self.image_ref)\n if type(image) is tuple:\n # for decode purpose\n self.image = Surface(TILESIZE_SCREEN)\n self.image.fill(image)\n elif type(image) is list or type(image) is dict:\n self.animated = True\n self.current_frame = 0\n self.last_update = 0\n if type(image) is list:\n self.list_image = image\n self.image = self.list_image[self.current_frame]\n else:\n self.last_direction = (1, 0)\n self.dict_image = image\n self.image = self.dict_image['E'][self.current_frame]\n else:\n self.image = image\n self._reposition_rect()", "def __init__(self):\n\n #create initial tile array and animation dictionary for walkonto animations \n self.array = []\n self.animations = {}", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\t# initial state of all attributes\n\t\tself._start = dict()\t\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tself._start[attr] = getattr(subtarget, subattr)\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def __init__(self,b=5,alpha=1.0):\n wanimation.__init__(self,lambda x:float(b*x),self.render_function,alpha)", "def start_animation(self):\n\t\ttime.sleep(1)\n\t\tself.fishbowl.animate_balls()", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.scenes = {\"menu\": MenuScene(),\n \"settings\": SettingsScene(),\n \"score\": ScoreScene(),\n \"game\": GameScene(),\n \"pause\": PauseScene(),\n \"game_over\": GameOverScene(),\n \"logo\": LogoScene()}\n self.scene_name = \"logo\" # start scene\n self.previous_scene_name = None\n self.scene = self.scenes[self.scene_name]\n\n self.__manager = ResourceManager()\n self.__display = self.settings.get_display()\n self.__display.fill(BACKGROUND_COLOR)\n pygame.display.flip()", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def __init__(self, img, width, height, animations=None, frame=0, speed=0.125, start_animation=E_ANIM):\n super().__init__(img, 0, 0, width, height)\n self.img = img\n\n self.current_animation = start_animation\n self.frame = frame\n self.speed = speed\n self.timer = 0\n self.direction = (0,1)\n\n if animations:\n self.anims = animations\n else:\n self.anims = { E_ANIM: (0,1) }", "def __init__(self, game_object, animation_list):\n self.animation_list = animation_list\n self.current_playing_animation = None\n self.animation_idx = 0\n self.is_paused = False\n for animation in self.animation_list:\n animation.set_animator(self)\n super().__init__(game_object)", "def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)", "def __init__(\n self : \"animation\",\n filename : \"str\",\n size : \"Tuple[int,int]\" = None,\n pbar : \"bool\" = False,\n mbs : \"int\" = 16,\n dpi : \"int\" = 150,\n init_frame : \"matplotlib.figure.Figure\" = None,\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\" = None,\n fps : \"int\" = 5,\n interactive : \"bool\" = False,\n autoSmooth : \"bool\" = False,\n smoothingFrames : \"int\" = 5,\n saveFinalFrame : \"int\" = False,\n smoothingTime : float = None,\n smoothingFunction : \"Callable\" = None\n ):\n self.filename = filename\n self.size = size\n self._mbs = mbs\n self._writer = imageio.get_writer(\n self.filename,\n mode='I',\n macro_block_size=self._mbs,\n fps=fps\n )\n self.fps = fps\n self.pbar = pbar\n self._frame_number = 0\n self._closed = False\n self.dpi = dpi\n self._cframe = None\n if init_frame and init_ax:\n self._init_frame(init_frame, init_ax)\n\n self._init_interactive = matplotlib.is_interactive()\n if self._init_interactive and not interactive:\n matplotlib.interactive(False)\n else:\n matplotlib.interactive(interactive)\n if autoSmooth:\n assert smoothingFrames > 0\n\n self._autosmooth = autoSmooth\n self._prevFrame = None\n\n\n # Set up smoothing\n if smoothingTime is None:\n self._smoothingFrames = smoothingFrames\n else:\n self._smoothingFrames = int(smoothingTime*fps)\n\n if smoothingFunction is None:\n self._smoothingFunction = self._linear_interpolation\n else:\n self._smoothingFunction = smoothingFunction\n\n self._saveFinalFrame = saveFinalFrame", "def Initialize(self):\n return _gmat_py.PrecessingSpinner_Initialize(self)", "def __call__(self, *args):\n return _osgAnimation.AnimationManagerBase___call__(self, *args)", "def initialize(self):\n self.currState = self.startState", "def setup_fader(self):\n ScreenFader(fade=\"in\")\n self.should_change_scene = False\n self.should_fade_out = False\n self.change_scene_timer = 0.0", "def _reset_anim(self):\n self._visualization.reset()", "def __init__(self, straight):\n BaseFigureCanvas.__init__(self)\n self.straight = straight\n self.fill1 = None\n self.fill2 = None\n self.ax = self.fig_setup()\n self.beams = self.data_setup()\n self.anim = animation.FuncAnimation(self.figure, self.animate,\n init_func=self.init_data, frames=1000, interval=20)" ]
[ "0.69974357", "0.6579405", "0.6547325", "0.64473146", "0.64049774", "0.6344792", "0.6333705", "0.63244337", "0.6236016", "0.6223936", "0.62055165", "0.6190619", "0.61838394", "0.6117025", "0.61123455", "0.6103143", "0.61002237", "0.6091002", "0.6068729", "0.6032386", "0.6015172", "0.59969217", "0.5930925", "0.5925209", "0.5898308", "0.58568054", "0.5843044", "0.5817819", "0.5773938", "0.57681906" ]
0.8126538
0
Update animation. Called by each render call. Skips certain number of simulation steps per frame to achieve 60 Hz output.
def _update_anim(self): if self._skip_frames > 1: # Do not render while _skip_frames is > 1 self._skip_frames -= 1 else: # Render frame self._visualization.taskMgr.step() # Calculate number of frames that need to be skipped self._skip_frames = int(1 / self._fps / self._dt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid_animation_quick(self, frames, iterations=10, fps=0.02, figsize=(6, 6)):\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n\r\n for r in np.arange(0, iterations):\r\n ax.cla()\r\n ax.axes.grid(False)\r\n ax.set_axis_off()\r\n im = ax.imshow(frames[0], cmap=color_map, animated=True)\r\n for image, step in zip(frames[1:], np.arange(1, len(frames[1:])+1)):\r\n time.sleep(fps)\r\n ax.title.set_text('Rule 942 | Step ' + str(step) + ' | Active ' + str(int(np.sum(image))))\r\n im.set_data(image)\r\n fig.canvas.draw()", "def time_history_animation(self, frame_step=1, magnification=1):\n import matplotlib.pyplot as plt\n import matplotlib.animation as ani\n\n \"\"\"Retrieve maximum displacement for axis limits\"\"\"\n max_list = [max(map(abs, item)) * magnification for item in self.displacement]\n\n \"\"\"Start figure for animation\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n \"\"\"Define the rectangles that represent the DOFs\"\"\"\n rectangle = []\n for i in range(len(self.coordinates)):\n rectangle.append(plt.Rectangle((self.coordinates[i][0],\n self.coordinates[i][1]),\n self.size[i][0], self.size[i][1], alpha=0.5))\n\n \"\"\"Init function for animation draws the frame, so that blip can be used and the animation runs faster\"\"\"\n\n def init():\n for i in range(len(self.coordinates)):\n ax.add_patch(rectangle[i])\n plt.axis('auto')\n plt.xlim([-max(max_list) + min(self.coordinates[:][0]),\n max(max_list) + max([item[0] for item in self.coordinates]) + max(self.size[:][0])])\n return rectangle\n\n \"\"\"Animation function: only the coordinates of the rectangles are updated here\"\"\"\n\n def motion(t_step):\n for i in range(len(self.coordinates)):\n rectangle[i].set_xy((float(self.coordinates[i][0]\n + self.displacement[i][t_step * frame_step] * magnification),\n float(self.coordinates[i][1])))\n return rectangle\n\n \"\"\"Animation function: inter gives the time delay between frames in milli seconds\"\"\"\n inter = int(1000 * self.dt * frame_step)\n self.anim = ani.FuncAnimation(fig,\n motion,\n init_func=init,\n interval=inter,\n blit=True)\n\n motion(int(len(self.displacement) / frame_step))\n plt.show()", "def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)", "def start_sim(self):\n self.anim = animation.FuncAnimation(self.fig, self.anim_func, frames = self.timesteps, interval = 1, blit=True)\n plt.show()", "def render(self):\n step = 1\n while step < self.number_steps and self.update():\n step += 1", "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def grid_animation(self, steps, figure_size=(12, 12), speed=100):\r\n\r\n steps -= 1\r\n x = self.seed\r\n\r\n fig, ax = plt.subplots(figsize=figure_size)\r\n ax.grid(False)\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n im = plt.imshow(x[1:-1:1, 1:-1:1], interpolation='nearest', cmap=color_map, animated=True)\r\n counter = 0\r\n\r\n def update_figure(*args):\r\n nonlocal x, counter, fig\r\n\r\n counter += 1\r\n x, stats = self.update_grid(x)\r\n plt.title(self.title + ' | Step ' + str(counter), fontsize=14)\r\n im.set_array(x[1:-1:1, 1:-1:1])\r\n\r\n return im, # why is this comma necessary?\r\n\r\n ani = animation.FuncAnimation(fig, update_figure, frames=steps,\r\n interval=speed, blit=False, repeat=False)\r\n\r\n return ani", "def random_walk_draw(self,num_plots,animated=False,show=True):\n\t\t\n\t\tt_x_arrays = []\n\t\tt_max = self.n\n\t\tfor _ in range(num_plots):\n\t\t\tcurrent_x = self.x_initial\n\t\t\tx_array = [current_x]\n\t\t\tt_array = range(t_max + 1)\n\t\t\tsteps = self._random_walk_simulation()\n\t\t\tfor s in steps:\n\t\t\t\tcurrent_x += s\n\t\t\t\tx_array.append(current_x)\n\t\t\tt_x_arrays.append( [x_array,t_array] )\n\t\t\n\t\t\n\t\tfig = plt.figure('Random walk simulation')\n\t\tax = fig.add_subplot(1,1,1)\n\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)),round(max(x_array) + np.sqrt(self.n)*3)])\n\t\tax.set_xlim([-(round(np.sqrt(self.n))),self.n+(round(np.sqrt(self.n)))])\n\t\t\n\t\tif animated == True: # THIS CASE CURRENTLY HAS BUG FOR SOME REASON. CODE IS IDENTICAL TO 2D ANIMATION?\n\t\t\tfig.suptitle('Simulation of 1D random walk, live')\n\t\t\tself.index = 0\n\t\t\tdef update(i):\n\t\t\t\tax.clear()\n\t\t\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)), round(max(x_array) + np.sqrt(self.n)*3)])\n\t\t\t\tax.set_xlim([-(round(np.sqrt(self.n))), self.n+(round(np.sqrt(self.n)))])\n\t\t\t\tfor i in t_x_arrays:\n\t\t\t\t\tx_vals,t_vals = i \n\t\t\t\t\tax.plot(t_vals[:self.index], x_vals[:self.index])\n\t\t\t\tself.index += 1\n\t\t\ta = anim.FuncAnimation(fig, update, frames=self.n, repeat=False,interval=10)\n\t\telse:\n\t\t\tfig.suptitle('Simulation of 1D random walk, static')\n\t\t\tfor i in t_x_arrays:\n\t\t\t\tx_vals,t_vals = i\n\t\t\t\tax.plot(t_vals, x_vals)\n\t\t\t\n\t\tif show == True:\n\t\t\tplt.show()", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)", "def update_animations():\n\tt = _pg.time.get_ticks()\n\tfor a in _running:\n\t\ta._update(t)", "def update(self):\n if self.iteration > self.rate:\n self.iteration = 0\n heading = (random.random() * 180) - 90\n self.speed = 0.1\n if heading >= 0:\n self.heading = heading\n else:\n self.heading = 360 + heading\n self.iteration += 1\n self.setVector(self.speed, self.heading)", "def play(self):\n frame_time = 0\n last_angle = 5\n while self.RENDER_FRAME:\n frame_time += self.clock.get_time()\n if frame_time > 15:\n frame_time = 0\n self.event_handler()\n self.update_entities()\n self.draw_frame()\n\n scaled_surface = pygame.transform.scale(\n self.render_surface, (self.DISPLAY_WIDTH, self.DISPLAY_HEIGHT)\n )\n self.display.blit(scaled_surface, (0, 0))\n\n pygame.display.update()\n self.clock.tick()", "def runFrame(self):\n self._drawFrame(self._advanceTime())", "def draw(self):\n\n # I reset it at 24 because they're 4 images and I want the reduce the animation speed by 6 (6*4=24)\n if self.spriteCount + 1 >= 24:\n self.spriteCount = 0\n if self.isJump:\n self.screen.blit(self.spriteJump[self.spriteCount // 6], (self.x_pos, self.y_pos))\n else:\n self.screen.blit(self.spriteFall[self.spriteCount // 6], (self.x_pos, self.y_pos))\n self.spriteCount += 1", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def animate(self, i):\n try:\n self.lastSpectrum = self.spectrometer.getSpectrum()\n if self.darkReference is not None:\n self.lastSpectrum -= self.darkReference\n if self.whiteReference is not None:\n np.seterr(divide='ignore',invalid='ignore')\n if self.darkReference is not None:\n self.lastSpectrum = self.lastSpectrum / (self.whiteReference-self.darkReference)\n else:\n self.lastSpectrum = self.lastSpectrum / self.whiteReference \n\n self.plotSpectrum(spectrum=self.lastSpectrum)\n except usb.core.USBError as err:\n print(\"The spectrometer was disconnected. Quitting.\")\n self.quitFlag = True\n\n if self.quitFlag:\n self.animation.event_source.stop()\n self.animation = None\n plt.close()", "def update(self, delta_frames=1):\n self.x -= ANIMATION_SPEED * frames_to_msec(delta_frames)", "def on_update(self, delta_time):\n self.time += 1\n if self.time <= 900:\n self.gif.update_animation()", "def animate_starter(self, **kwargs):\n interval = 5 # this number works fine, but is rather arbirtary, presumably in milliseconds\n print(\"The timer length is \" + str(len(self.sy.short_timer)))\n print(\"Shape of coordinate_grid is \" + str(np.shape(self.sy.coordinate_grid)))\n print(\"The animator interval was \" + str(interval) + \" in unknown units\")\n # I don't currently understand why the galaxy chooses\n # to slow down mid way through.\n # Perhaps I should look at the FuncAnimation\n # dictionary and work out what has gone wrong.\n with plt.style.context((\"dark_background\")):\n ani = animation.FuncAnimation(\n self.fig,\n self.animate,\n frames=len(self.sy.short_timer),\n interval=interval,\n blit=True,\n init_func=self.ani_init,\n )\n ani.save(\n str(self.co.out)\n + \"/\"\n + str(self.name)\n + \"move_with_\"\n + str(self.move_with)\n + \".mp4\",\n writer=writer,\n )\n plt.clf() # always make sure you close the lid", "def animation(self, t):\n self.program['u_clock'] = 2*t\n gloo.clear('black')\n self.program.draw('points')\n return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]", "def start_animation(self):\n\t\ttime.sleep(1)\n\t\tself.fishbowl.animate_balls()", "def start(self):\n for i in xrange(self.num_pulses):\n self.fillColor = \"white\"\n for j in xrange(self.num_frames_on):\n self.draw()\n self.win.flip()\n if j == 0:\n # Only store the time of the first occuring on frame.\n self.utc_timestamps.append(datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ'))\n self.fillColor = \"black\"\n for j in xrange(self.num_frames_off):\n self.draw()\n self.win.flip()", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def update(self):\n self._num_frames += 1", "def step(self, amt=1):\n \n # For checking if all the animations have their framse looked at\n #activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n #print \"Worm {} at {:5g}\".format(activewormind, 1000*(time.time() - starttime))\n # save times activated for each worm \n [self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]\n \n #self._led.buffer = [0] * 480\n self._led.pixheights = [-100] * self._led.numLEDs\n #print type(self._led.buffer)\n for ledcopy in self._ledcopies:\n # self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)\n # use pixheights but assume all buffers same size\n # print ledcopy.driver[0].pixheights\n for pix in range(self._led.numLEDs):\n #for ledcopy in self._ledcopies:\n if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]\n elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]\n self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix] \n self._step += 1", "def run_next(self, action):\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Run the simulation loop\r\n self.SimulationLoop(action)\r\n if GUIEnabled and self.settings.drawMenu:\r\n self.gui_app.paint(self.screen)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.settings.hz)\r\n self.fps = self.clock.get_fps()", "def animation(self, freq=100):\n if (self.current_time - self.timer) > freq:\n if self.index < (len(self.image_list) - 1):\n self.index += 1\n else:\n self.index = 0\n self.timer = self.current_time\n self.image = self.image_list[self.index]", "def SetAnimationStep(self, step):\r\n\r\n self._animation_step = float(step)", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if" ]
[ "0.67353886", "0.6680484", "0.66650397", "0.66557145", "0.66444975", "0.6517887", "0.649021", "0.64420825", "0.64265084", "0.6239542", "0.6208433", "0.6200715", "0.6192603", "0.61721265", "0.6136987", "0.6074246", "0.60647804", "0.6064689", "0.60567117", "0.6033081", "0.5989959", "0.5983667", "0.59822136", "0.5950068", "0.59481305", "0.59431696", "0.5886726", "0.58839655", "0.5859434", "0.58396584" ]
0.7681746
0
A main thread that focuses soley on grabbing frames from a camera, limited only by self.fps Thread is created at startThread, which can be called by setPaused Thread is ended only at endThread
def __videoThread(self): self.frameList = [] fpsTimer = FpsTimer(self.fps) printf("Starting videoStream thread.") while self.running: fpsTimer.wait() if not fpsTimer.ready(): continue if self.setCamera is not None: self.__setNewCamera(self.setCamera) if self.paused: continue if self.cap is None: continue # Get a new frame ret, newFrame = self.cap.read() if not ret: # If a frame was not successfully returned printf("ERROR: while reading frame from Cam. Setting camera again...") self.__setNewCamera(self.cameraID) cv2.waitKey(1000) continue # Do frame related work with self.frameLock: self.frame = newFrame # Add a frame to the frameList that records the 5 latest frames for Vision uses self.frameList.insert(0, self.frame.copy()) # print("len", len(self.frameList), "Curr frames: ", [id(frame) for frame in self.frameList]) while len(self.frameList) > 10: del self.frameList[-1] # Keep track of new frames by counting them. (100 is an arbitrary number) if self.frameCount >= 100: self.frameCount = 0 else: self.frameCount += 1 # Run any work functions that must be run. Expect no results. Work should be run before filters. if len(self.workList) > 0: # print("Work: ", self.workList) with self.workLock: for workFunc in self.workList: workFunc(self.frame) # Run any filters that must be run, save the results in self.filterFrame if len(self.filterList) > 0: # print("Filters: ", self.filterList) with self.filterLock: filterFrame = self.getFrame() for filterFunc in self.filterList: filterFrame = filterFunc(filterFrame) # Draw FPS on the screen fps = str(int(round(fpsTimer.currentFPS, 0))) cv2.putText(filterFrame, fps, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.25, (255, 255, 255), 2) self.filterFrame = filterFrame else: self.filterFrame = self.frame printf("VideoStream Thread has ended")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n if BaseCamera.thread is None:\n BaseCamera.last_access = time.time()\n\n # start background frame thread\n BaseCamera.thread = threading.Thread(target=self._thread)\n BaseCamera.thread.start()\n\n # wait until frames are available\n while self.get_frame() is None:\n time.sleep(0)", "def start_camera(self):\n # create the video capture thread\n self.thread = VideoThread()\n # connect its signal to the update_image slot\n self.thread.change_pixmap_signal.connect(self.update_image)\n # start the thread\n self.thread.start()", "def start(self, resolution=(640, 480), framerate=16):\n self.stopped = False\n\n # Initialize camera\n self.camera = cv2.VideoCapture(0)\n self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, resolution[0])\n self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, resolution[1])\n self.grab_image()\n\n # Start thread\n thread = Thread(target=self.update, args=())\n thread.daemon = True\n thread.start()", "def update(self):\r\n while True:\r\n # if the thread indicator variable is set, stop the thread\r\n if self.stopped:\r\n self.webcam.release()\r\n return\r\n # otherwise, read the next frame from the webcam stream\r\n grabbed, frame = self.webcam.read()\r\n '''if self.cam_zoom:\r\n frame = self.zoom(frame)'''\r\n with self.read_lock:\r\n self.grabbed = grabbed\r\n try:\r\n #self.frame = cv2.resize(frame, (self.width, self.height))\r\n self.frame = frame\r\n except:\r\n None", "def start(self):\n self.log('Start capturing.')\n # ---\n try:\n self.setup()\n # run camera thread\n self._worker = Thread(target=self.run)\n self._worker.start()\n except StopIteration:\n self.log('Exception thrown.')", "def video_thread():\n global last_frame\n # Creating stream capture object\n cap = cv2.VideoCapture('udp://' + drone.tello_ip + ':11111')\n\n while(True):\n _, last_frame = cap.read()\n cap.release()", "def run_frame_thread(frame_reader_class, camera_id, video_source, queue, fps):\n logging.debug(\"starting frame_reader run loop\")\n try:\n frame_reader = frame_reader_class(camera_id, video_source)\n except Exception as e:\n logger.critical(\"Failed to load CV2FrameReader: %s\" % e)\n raise e\n\n # dump initial frames, as it seems certain cameras\n # flub the first few for some reason:\n for i in range(5):\n frame_reader.get_frame()\n while True:\n try:\n frame = frame_reader.get_frame()\n except queue.Empty:\n continue\n except Exception as e:\n logger.error(\"Failed to instantiate Frame: %s\" % e)\n try:\n queue.put(frame)\n except Exception as e:\n print(\"Failed to put frame onto queue: %s\" % e)\n time.sleep(1.0/fps)", "def run_capture(video_q):\n cam = cv2.VideoCapture(0)\n print(f\"width: {cam.get(3)}, height: {cam.get(4)}, fps: {cam.get(5)}\")\n while is_running:\n\n if not video_q.full(): \n ok, frame = cam.read()\n if not ok:\n # camera disconnected\n break\n\n video_q.put(frame)\n\n cam.release()\n\n # empty the queue otherwise the main process will hand as the queue feeder\n # thread will not terminate while the queue has items. Empty it here as this\n # is the only place that adds to the queue\n while not video_q.empty():\n video_q.get()\n\n print(\"camera thread exited\")", "def start():\n global running\n running = True\n messagebox.showinfo(\"Camera mode\",\"Start image grab\")\n camera.start_preview(fullscreen=False, window = (100,20,612,404))", "def run(self):\n\n \"\"\"Call this function before trying to play any video with\n play_segment() or play().\n \"\"\"\n print(\"Task 2 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 2: {}\".format(os.getpid()))\n\n # If we don't use the MainLoop, messages are never sent.\n def start():\n print(\"Task 3 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 3: {}\".format(os.getpid()))\n print('====================> Using MainLoop\\n')\n loop = GLib.MainLoop()\n loop.run()\n \n \n print('====================> Starting a new thread for the player\\n')\n t = threading.Thread(target=start, name='thread_player')\n t.start()\n #_thread.start_new_thread(start, ())", "def show_dummy(thread, frame, iterations = 1):\n while True:\n show_camera(thread.process_dummy(frame, iterations))\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cv2.destroyWindow(thread.name)\n\n for i in range(5):\n cv2.waitKey(1) # wat", "def run(self):\n i = 0\n t = time.time()\n while True:\n i = i + 1\n ret, frame = self.stream.read()\n if (i == 20):\n self.fps = 20/(time.time() - t)\n t = time.time()\n i = 0\n #If a frame is None need to re-init it: \n # - close a stream;\n # - reopen it;\n # - read frame again\n if frame is None:\n self.stream.release()\n self.stream = cv2.VideoCapture(self.url)\n ret, frame = self.stream.read()\n text = time.strftime('%Y-%m-%d %H:%M:%S')\n if (self.fps > 0):\n text = text + ' FPS: ' + str(round(self.fps))\n self.frame = cv2.putText(frame, text, (10, int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)\n self.frameID = uuid.uuid4()", "def __init__(self, camera, camera_width, camera_height):\n threading.Thread.__init__(self)\n self._camera = camera\n self._stop = threading.Event()\n self._frame_lock = threading.Lock()\n self._writer_lock = threading.Lock()\n self._frame = None\n\n self._fourcc = None\n self._writer = None\n self._width = camera_width\n self._height = camera_height\n self._back = np.zeros((320,240,3), dtype=\"uint8\")\n self._is_ready = False\n self._is_recorded = False", "def _start_video_buffering(self):\n # open/draw the GUI\n app = QApplication(sys.argv)\n self.vlc_gui = Player(vlc_player=self.player, drone_gui=self)\n self.vlc_gui.show()\n self.vlc_gui.resize(640, 480)\n\n # ensure that closing the window closes vision\n app.aboutToQuit.connect(self.land_close_exit)\n\n if (self.user_vision_thread is not None):\n print(\"Starting user vision thread\")\n self.user_vision_thread.start()\n\n # setup the timer for snapshots\n self.timer = QTimer(self.vlc_gui)\n self.timer.setInterval(self.vision_interval)\n self.timer.timeout.connect(self._buffer_vision)\n self.timer.start()\n\n # show the stream\n success = self.player.play()\n print(\"success from play call is %s \" % success)\n\n # start the GUI loop\n app.exec()", "def __init__(self, video):\n threading.Thread.__init__(self)\n self._stop = threading.Event()\n self._pause = True\n self._video = video\n #self._fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n #self._fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()\n self._fgbg = cv2.createBackgroundSubtractorMOG2()\n self._init = True\n self._hasMotion = False", "def run(self):\n while True:\n self.ret, self.frame = self.cap.read()\n if self.ret:\n rgbImage = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)\n convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)\n self.readyFrame = convertToQtFormat.scaled(500, 375, Qt.KeepAspectRatio)\n self.send_camera_view_to_gui.emit(self.readyFrame)", "def run(self):\n\n info(\"creating camera\")\n self.camera_controller = CameraController()\n self.camera_controller.camera.resolution = self.photo_resolution\n\n self.screen_resolution = ui.get_screen_resolution()\n self.normalized_screen_resolution = ui.normalize_dimension(self.screen_resolution)\n info(\"screen_resolution: %s\", self.screen_resolution)\n info(\"normalized_screen_resolution: %s\", self.normalized_screen_resolution)\n\n info(\"creating buffer image and canvas\")\n self.buffer_image = Image.new('RGB', self.normalized_screen_resolution)\n self.canvas = ImageDraw.Draw(self.buffer_image)\n debug(\"buffer_image resolution: %s\", self.buffer_image.size)\n\n info(\"creating preview renderer\")\n self.preview_renderer = self.camera_controller.start_preview(\n fullscreen=False,\n window=ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0] * 0.75,\n self.normalized_screen_resolution[1]\n )))\n debug(\"preview location: %s\", self.preview_renderer.window)\n\n info(\"creating window renderer\")\n self.window_renderer = self.camera_controller.add_overlay(\n self.buffer_image.tobytes(),\n size=self.buffer_image.size,\n fullscreen=False,\n layer=1,\n window=(\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n debug(\"window location: %s\", self.window_renderer.window)\n\n info(\"setting up UI\")\n self._setup_ui()\n\n info(\"setting up input\")\n self.yes_button = GPIOButton(self.yes_pin)\n self.no_button = GPIOButton(self.no_pin)\n\n info(\"starting app\")\n self._enter_state(STATE_DEFAULT)\n self.render_timer.start()\n ui_context = ui.UIContext(self.canvas, self.window, update_function=self._logic)\n ui_context.main_loop()\n\n info(\"exiting\")", "def start(self):\r\n threading.Thread(target=self.update_frame, args=()).start()\r\n return self", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def run(self):\n pre_stop = False\n begin_t = 0\n end_t = 0\n\n self.removeNoise()\n\n while (not self.stopped()):\n\n if self._pause:\n time.sleep(0.001)\n continue\n\n frame = self._video.getImage()\n\n if not (frame is None):\n fgmask = self._fgbg.apply(frame)\n hist = cv2.calcHist([fgmask],[0],None,[256],[0,256])\n\n white_count = hist[255]\n\n if (white_count > 500):\n if not self._video.isRecorded() and not self._pause:\n if self._video.startRecord():\n self._hasMotion = True\n print('[Detector] start record video')\n else:\n print('[Detector] start record video fail!')\n pre_stop = False\n elif (white_count <= 100) and self._video.isRecorded():\n if not pre_stop:\n pre_stop = True\n begin_t = clock()\n else:\n end_t = clock()\n if end_t - begin_t > 10:\n if self._video.stopRecord():\n self._hasMotion = False\n print('[Detector] stop record video')\n else:\n print('[Detector] stop record video fail!')\n if self._video.isRecorded():\n self._hasMotion = False\n self._video.stopRecord()\n print('[Detector] end Thread')", "def get_frames(self):\n video_getter = Thread(target=self.streamer)\n video_getter.daemon = True\n video_getter.start()", "def start(self):\n\t\tself.__thread = Thread(target=self.__update, name='CamGear', args=())\n\t\tself.__thread.daemon = True\n\t\tself.__thread.start()\n\t\treturn self", "def run(self):\n if self.camera is None:\n print('Warning: Viewer.camera is None')\n return\n\n last_update = glfw.get_time()\n last_framerate_update = last_update\n nb_frames_per_second = 0\n while not glfw.window_should_close(self.window):\n # clear draw buffer and depth buffer\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n # update our scene\n current_time = glfw.get_time()\n delta_time = current_time - last_update\n self.update(delta_time)\n last_update = current_time\n\n # get view and projection matrix from camera\n size = glfw.get_window_size(self.window)\n view = self.camera.view_matrix()\n projection = self.camera.projection_matrix(size)\n\n # draw our scene\n self.draw(projection, view, identity(), identity(), self.camera)\n\n # flush and swap buffers\n glfw.swap_buffers(self.window)\n\n # update framerate information\n nb_frames_per_second += 1\n current_time = glfw.get_time()\n delta_time = current_time - last_framerate_update\n if delta_time >= 1:\n ms_per_frame = 1000 * delta_time / nb_frames_per_second\n glfw.set_window_title(self.window, '{} - {:.1f} ms'.format(self.title, ms_per_frame))\n last_framerate_update = current_time\n nb_frames_per_second = 0\n\n # Poll and process new events\n glfw.poll_events()", "def __init__(self, output_path = \"./\"):\n self.vs = cv2.VideoCapture() # capture video frames, 0 is your default video camera\n self.vs.set(cv2.CAP_PROP_BUFFERSIZE, 2)\n #Decrease frame size\n self.vs.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\n #cv2.CAP_PROP_FRAME_WIDTH\n self.vs.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n self.vs.release()\n self.output_path = output_path # store output path\n self.current_image = None # current image from the camera\n\n self.isReplay = False\n self.initStream = True\n self.cClear = True\n self.killThread = False\n\n self.fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n self.cache = cv2.VideoWriter()\n self.cache.release()\n self.replayStream = cv2.VideoCapture()\n self.replayStream.release()\n print('[SB Live] Initialied video streams...')\n\n self.root = tk.Tk() # initialize root window\n self.root.title(\"SpikeBall Live\") # set window title\n # self.destructor function gets fired when the window is closed\n self.root.protocol('WM_DELETE_WINDOW', self.destructor)\n\n self.cam = tk.Frame(self.root)\n self.gui = tk.Frame(self.root)\n self.cam.pack(side='top')\n self.gui.pack(side='bottom', fill='both', expand=True)\n\n self.panel = tk.Label(self.root) # initialize image panel\n self.panel.pack(in_=self.cam, padx=10, pady=10)\n print('[SB Live] Initialized GUI...')\n\n self.serverProcess = subprocess.Popen(['python3', 'sblive/server.py'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n self.s = threading.Thread(target=self.get_server_response)\n self.s.start()\n\n\n # create a button, that when pressed, will take the current frame and save it to file\n btn = tk.Button(self.root, text=\"Toggle Replay\", command=self.toggle_replay)\n btn.pack(in_=self.gui, side='left', expand=True, padx=10, pady=10)\n\n btn2 = tk.Button(self.root, text=\"New Point\", command=self.clear_cache)\n btn2.pack(in_=self.gui, side='left', expand=True, padx=10, pady=10)\n\n self.t = threading.Thread(target=self.video_loop)\n print('[SB Live] Initialized stream thread')\n\n # start a self.video_loop that constantly pools the video sensor\n # for the most recently read frame\n self.t.start()", "def start(self):\n # start a thread to read frames from the file video stream\n self.thread.start()\n return self", "def capture_video(self):\n while self.capturing:\n nparray = self.source.get_frame()\n self.frame_buffer.put(Frame(nparray, self.frame))\n self.frame += 1\n print \"Stopping Capture\"", "def camera_exec():\n pygame.init()\n locals()\n\n plot_num = 0\n running, Clock, font = camera_connect()\n while running:\n Clock.tick(100)\n\n # read framebuffer\n fb = None\n while (True) :\n try:\n fb = pyopenmv.fb_dump()\n break\n except Exception as e:\n # try and reconnect on failure\n camera_connect()\n\n # signal to UArm that camera has connected\n camera_started.set()\n if fb is not None:\n # create image from RGB888\n image = pygame.image.frombuffer(fb[2].flat[0:], (fb[0], fb[1]), 'RGB')\n screen = pygame.display.set_mode((fb[0], fb[1]), pygame.DOUBLEBUF, 32)\n\n fps = Clock.get_fps()\n # blit stuff\n screen.blit(image, (0, 0))\n screen.blit(font.render(\"FPS %.2f\"%(fps), 1, (255, 0, 0)), (0, 0))\n\n # update display\n pygame.display.flip()\n\n # get output from text buffer\n tx_len = pyopenmv.tx_buf_len()\n\n # object was found by camera if there is outputted text\n if tx_len:\n\n '''\n if UArm has signaled to the camera to identify the object and the camera has not already\n assigned values to the global variables associated with the object's location\n '''\n if camera_event.is_set() and (data_ready.is_set() is False):\n\n # read the most recent data at index 0 from the text buffer\n buff = pyopenmv.tx_buf(tx_len).decode()\n split_buff = str(buff).splitlines()\n if h_angle_key in split_buff[0]:\n\n # Most recent line in buff contains needed information\n global h_angle, v_angle, is_centered\n tok = split_buff[0].split()\n\n # set angles to corresponding values determined by camera\n h_angle, v_angle = float(tok[1]), float(tok[3])\n if tok[5] == \"True\":\n is_centered = True\n else:\n is_centered = False\n # signal that global variables have been set\n data_ready.set()\n\n if plot_ready.is_set():\n print(\"success_rate: \", success_history)\n plot_distance(distance_history, plot_num)\n plot_success(success_history, plot_num)\n plot_num += 1\n plot_ready.clear()\n print(\"success rate for \", len(success_history), \" tests: \",\n success_history.count(True) / len(success_history))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_c:\n pygame.image.save(image, \"capture.png\")\n\n pygame.quit()\n pyopenmv.stop_script()", "def runVideoAsThread():\r\n\r\n app = wx.PySimpleApp()\r\n frame = ImageFrame(None)\r\n frame.SetSize((800, 600))\r\n frame.Show(True)\r\n\r\n myImageIn = ImageIn(frame.window)\r\n\r\n t = threading.Thread(target=app.MainLoop)\r\n t.setDaemon(1)\r\n t.start()\r\n\r\n return myImageIn.SetData", "def video_loop(self):\n\n _, img = self.vs.read()\n img = imutils.resize(img, width=self.width)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n self.frame.configure(image=image)\n self.frame.photo = image\n\n self.top.after(self.fps, self.video_loop)", "def run(self):\n self.status = \"Processing\"\n start_total_time = time.time()\n while self.is_running:\n if self.video_queue.is_running:\n if self.video_queue.empty():\n if self.video_queue.thread.is_running:\n time.sleep(0.005)\n self.logger.log(0, 'VIDEO QUEUE EMPTY')\n else:\n self.finalize()\n else:\n try:\n if self.video_queue:\n img = self.video_queue.get()\n if type(img) is np.ndarray:\n start_time = time.time()\n self.logger.log(0, \"TIME AFTER CURRENT_TIME {}\".format( time.time()-start_time ))\n gender_video_predict = self.face_gender_detector.detect_genders_from_img(img)\n if gender_video_predict:\n self.logger.log(0, \"FACES DETECTED. TIME {}\".format( time.time()-start_time ))\n final_gender = gender_video_predict[0][\"gender\"]\n dict_detection = OrderedDict(\n [('frame', self.actual_frame_number),\n ('gender', final_gender)])\n self.results.append(dict_detection)\n self.actual_frame_number += 1\n self.logger.log(0, \"TIME AFTER dict_detection {}\".format( time.time()-start_time ))\n self.logger.log(0, \"TIME AFTER write_results {}\".format( time.time()-start_time ))\n self.progress = self.update_progress()\n self.logger.log(0, \"TIME AFTER update_progress {}\".format( time.time()-start_time ))\n total_time = time.time() - start_total_time\n self.logger.log(\n 10, \"PROGRESS: {}; TIME ELAPSED: {}; E.T.A: {}\".format(\n self.progress, \n timedelta(seconds=int(total_time)),\n timedelta(\n seconds=int(total_time*100/self.progress) - int(total_time))))\n except:\n self.status = \"Failed\"\n self.logger.error(\n 'Unexpected error : {}'.format(\n traceback.format_exc()))\n self.finalize()\n break\n else:\n self.logger.info('Queue has stopped')\n self.finalize()\n break\n self.status = \"Completed\"\n self.logger.info(f\"Analysis of video {self.video_queue.path} has been completed\")\n save_results(self.results, \"/home/visiona2/code/gender_equality_api/src/gender_equality/\")" ]
[ "0.7088039", "0.69147956", "0.6765443", "0.64327663", "0.64192265", "0.638756", "0.6384798", "0.6164235", "0.6132149", "0.61036897", "0.6100919", "0.6074926", "0.5999895", "0.59563357", "0.5916945", "0.58964497", "0.588757", "0.5865779", "0.58643776", "0.5859386", "0.5821989", "0.5813315", "0.5799284", "0.5792164", "0.57905555", "0.5778646", "0.57681954", "0.5766416", "0.5751957", "0.57224196" ]
0.7575143
0
Load sweet datasets. Sweetlead is a dataset of chemical structures for approved drugs, chemical isolates from traditional medicinal herbs, and regulated chemicals. Resulting structures are filtered for the active pharmaceutical ingredient, standardized, and differing formulations of the same drug were combined in the final database.
def load_sweet(featurizer='ECFP', split='index', reload=True, frac_train=.8, data_dir=None, save_dir=None, **kwargs): # Load Sweetlead dataset logger.info("About to load Sweetlead dataset.") SWEET_tasks = ["task"] if data_dir is None: data_dir = DEFAULT_DIR if save_dir is None: save_dir = DEFAULT_DIR if reload: save_folder = os.path.join(save_dir, "sweet-featurized", featurizer) if featurizer == "smiles2img": img_spec = kwargs.get("img_spec", "std") save_folder = os.path.join(save_folder, img_spec) save_folder = os.path.join(save_folder, str(split)) loaded, all_dataset, transformers = dc.utils.save.load_dataset_from_disk( save_folder) if loaded: return SWEET_tasks, all_dataset, transformers # Featurize SWEET dataset logger.info("About to featurize SWEET dataset.") if featurizer == 'ECFP': featurizer = dc.feat.CircularFingerprint(size=1024) elif featurizer == "smiles2img": img_spec = kwargs.get("img_spec", "std") img_size = kwargs.get("img_size", 80) featurizer = deepchem.feat.SmilesToImage( img_size=img_size, img_spec=img_spec) else: raise ValueError("Other featurizations not supported") dataset_file = os.path.join(data_dir, "sweet.csv.gz") if not os.path.exists(dataset_file): dc.utils.download_url(SWEETLEAD_URL) loader = dc.data.CSVLoader( tasks=SWEET_tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(dataset_file) # Initialize transformers transformers = [ dc.trans.BalancingTransformer(transform_w=True, dataset=dataset) ] logger.info("About to transform data") for transformer in transformers: dataset = transformer.transform(dataset) if split == None: return SWEET_tasks, (dataset, None, None), transformers splitters = { 'index': dc.splits.IndexSplitter(), 'random': dc.splits.RandomSplitter(), 'scaffold': dc.splits.ScaffoldSplitter(), 'task': dc.splits.TaskSplitter(), 'stratified': dc.splits.RandomStratifiedSplitter() } splitter = splitters[split] frac_train = kwargs.get("frac_train", 0.8) frac_valid = kwargs.get('frac_valid', 0.1) frac_test = kwargs.get('frac_test', 0.1) train, valid, test = splitter.train_valid_test_split( dataset, frac_train=frac_train, frac_valid=frac_valid, frac_test=frac_test) if reload: dc.utils.save.save_dataset_to_disk(save_folder, train, valid, test, transformers) all_dataset = (train, valid, test) return SWEET_tasks, (train, valid, test), transformers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sba_datasets(dbm, direc):\n foia_504_1991_present = pd.read_excel(direc + 'FOIA - 504 (FY1991-Present).xlsx')\n foia_7a_1991_1999 = pd.read_excel(direc + 'FOIA - 7(a) (FY1991-FY1999).xlsx', skiprows=1)\n foia_7a_2000_2009 = pd.read_excel(direc + 'FOIA - 7(a)(FY2000-FY2009).xlsx', skiprows=1)\n foia_7a_2010_present = pd.read_excel(direc + 'FOIA - 7(a) (FY2010-Present).xlsx')\n\n dbm.write_df_table(\n foia_504_1991_present, table_name='sba__foia_504_1991_present', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_1991_1999, table_name='sba__foia_7a_1991_1999', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_2000_2009, table_name='sba__foia_7a_2000_2009', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_2010_present, table_name='sba__foia_7a_2010_present', schema='data_ingest')", "def load_raw_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n\n os.makedirs('downloads', exist_ok=True)\n path = os.path.join('downloads', name)\n path_raw = os.path.join(path, 'raw')\n\n if name == 'iris':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'iris.data'), names=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'species'])\n\n elif name == 'wine':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'wine.data'), names=['class',\n 'alcohol',\n 'malic_acid',\n 'ash',\n 'alkalinity',\n 'magnesium',\n 'phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'dilution',\n 'proline'])\n\n elif name == 'titanic':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path_raw)) == 0:\n kaggle.api.competition_download_files('titanic', path_raw)\n titanic = pd.read_csv(os.path.join(path_raw, 'train.csv'))\n titanic_test = pd.read_csv(os.path.join(path_raw, 'test.csv'))\n return titanic, titanic_test\n\n elif name == 'lanl':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path)) == 0:\n kaggle.api.competition_download_files('LANL-Earthquake-Prediction', path_raw)\n if not os.path.exists(os.path.join(path_raw, 'test')):\n zip_ref = zipfile.ZipFile(os.path.join(path_raw, 'test.zip'), 'r')\n zip_ref.extractall(os.path.join(path_raw, 'test'))\n zip_ref.close()\n return pd.read_csv(os.path.join(path_raw, 'train.csv.zip'))\n\n elif name == 'MNIST':\n mnist = torchvision.datasets.MNIST('downloads', train=True, download=True)\n mnist_test = torchvision.datasets.MNIST('downloads', train=False, download=True)\n return mnist, mnist_test\n\n elif name == 'FashionMNIST':\n fmnist = torchvision.datasets.FashionMNIST('downloads', train=True, download=True)\n fmnist_test = torchvision.datasets.FashionMNIST('downloads', train=False, download=True)\n return fmnist, fmnist_test", "def load_drugs():\n\n print(\"Drugs\")\n\n Drug.query.delete()\n\n with open(\"seed_data/drug_seed.psv\") as drugs:\n for row in drugs:\n name = row.strip()\n\n drug = Drug(generic_name=name)\n\n db.session.add(drug)\n\n db.session.commit()", "def load_steels():\n path = os.path.join(DATA_DIR, \"yieldstrength-citrination-312.csv\")\n df = pd.read_csv(path, index_col=False)\n return df", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def import_datasets(snli_path):\n print('extract data from snli directory..')\n train = dict(); dev = dict(); test = dict()\n gold_labels = {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n\n for file_type in ['train', 'dev', 'test']:\n path = os.path.join(snli_path, 'snli_1.0_{}.jsonl'.format(file_type))\n with open(path) as file:\n data = [json.loads(line) for line in file]\n eval(file_type)['premise'] = [entry['sentence1'] for entry in data if entry['gold_label'] != '-']\n eval(file_type)['hypothesis'] = [entry['sentence2'] for entry in data if entry['gold_label'] != '-']\n g_labels = np.array([gold_labels[entry['gold_label']] for entry in data if entry['gold_label'] != '-'])\n eval(file_type)['label'] = g_labels\n print('extraction process was finished successfully!')\n return train, dev, test", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def load_susy(trainsize=500, testsize=1000):\n filename = 'datasets/susysubset.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def load_batched_dataset(is_train, embeddings):\n tensorize_text_fn = build_tensorize_text_fn(embeddings)\n unbatched = load_data(is_train)\n\n def tensorize(x):\n x[\"premise\"] = tensorize_text_fn(x[\"premise\"])\n x[\"hypothesis\"] = tensorize_text_fn(x[\"hypothesis\"])\n return x\n\n unbatched = unbatched.map(tensorize)\n\n hist_bins = list(range(5, 500, 5))\n batched = unbatched.apply(\n ops.bucket_by_quantiles(lambda x: x[\"premise\"][\"len\"], FLAGS.batch_size,\n 10, hist_bins))\n if is_train:\n batched = batched.shuffle(1000, reshuffle_each_iteration=True)\n batched = batched.repeat()\n\n # Get (features, label) format for tf.estimator\n return batched.map(lambda x: (x, x[\"label\"]))", "def create_dataset(json_data_filepath, dataset_filepath, drop_irrelevant_tweets):\n # Stupidity check.\n check_for_preexisting_output_file(dataset_filepath)\n\n global unknown_company_count_global, non_english_count_global\n log.info(f'\\tloading raw tweets from {json_data_filepath}')\n\n count = 0\n include_header = True\n\n # Load/save the file in chunks.\n for df_chunk in pd.read_json(json_data_filepath, orient='records', lines=True, chunksize=100):\n\n # Modify these to determine what to export to CSV/JSON.\n required_fields = ['retweeted_derived', 'company_derived', 'text_derived', # \"tweet_quoted_status_id\",\n 'tweet_url_link_derived', 'multiple_companies_derived_count', \"company_derived_designation\",\n 'tweet_text_length_derived', \"spaCy_language_detect_all_tweets\",\n \"user_description_text_length\", # \"polyglot_lang_detect_all_tweets\"\n ] + tweet_object_fields + user_object_fields + entities_object_fields\n\n # These fields are exported to a separate CSV/JSON file to cut down on file size.\n extra_fields = [\"tweet_id\"] + retweeted_status_object_fields\n\n # Rename main Tweet object fields.\n df_chunk[tweet_object_fields] = df_chunk[original_tweet_object_field_names]\n\n # FIXME - KeyError: ('quoted_status_id', 'occurred at index 0') - debug the issue.\n # df_chunk[\"tweet_quoted_status_id\"] = df_chunk.apply(rename_column, axis=1)\n\n # Extract Tweet \"user\" object fields.\n df_chunk[user_object_fields] = df_chunk.apply(compute_user_series, axis=1)\n\n # Determine the user profile description text length.\n df_chunk[\"user_description_text_length\"] = df_chunk.apply(\n lambda x: compute_user_description_text_length(x) if (pd.notnull(x[\"user_description\"])) else 0, axis=1)\n\n # Extract Tweet \"entities\" fields.\n df_chunk[\"tweet_entities_expanded_urls\"] = df_chunk.apply(compute_expanded_urls, axis=1)\n df_chunk['tweet_entities_hashtags'] = df_chunk.apply(compute_hashtags, axis=1)\n df_chunk[\"tweet_entities_user_mentions_id\"] = df_chunk.apply(compute_user_mentions_id, axis=1)\n df_chunk[\"tweet_entities_user_mentions_name\"] = df_chunk.apply(compute_user_mentions_name, axis=1)\n df_chunk[\"tweet_entities_user_mentions_screen_name\"] = df_chunk.apply(compute_user_mentions_screen_name, axis=1)\n df_chunk[\"tweet_entities_symbols\"] = df_chunk.apply(compute_symbols, axis=1)\n\n # Create/update/infer fields. (original extracted/derived fields)\n df_chunk['retweeted_derived'] = df_chunk.apply(compute_retweet, axis=1)\n df_chunk['text_derived'] = df_chunk.apply(compute_full_text, axis=1)\n df_chunk['company_derived'] = df_chunk.apply(compute_company, axis=1)\n df_chunk['tweet_url_link_derived'] = df_chunk.apply(compute_url_link, axis=1)\n\n # Count the # of companies each Tweet is associated with.\n df_chunk['multiple_companies_derived_count'] = \\\n df_chunk.apply(compute_number_of_associated_companies, axis=1)\n\n # Determine whether Tweet is associated with \"company_name\" or \"multiple\" companies.\n df_chunk[\"company_derived_designation\"] = df_chunk.apply(compute_company_designation, axis=1)\n\n # Compute Tweet text length.\n df_chunk[\"tweet_text_length_derived\"] = df_chunk.apply(compute_text_length, axis=1)\n\n # Extract Tweet object \"retweeted_status\" object fields.\n df_chunk[retweeted_status_object_fields] = df_chunk.apply(compute_flatten_retweeted_status_attribute, axis=1)\n\n # Flatten nested fields in \"retweeted_status_user\". FIXME - non-functional.\n # df_chunk[retweeted_status_user_object_fields] = df_chunk.apply(\n # compute_flatten_retweeted_status_user_attributes, axis=1)\n\n # Determine the Tweet text's language using spaCy natural language processing library. (note: slow)\n df_chunk[\"spaCy_language_detect_all_tweets\"] = df_chunk.apply(\n lambda x: spacy_language_detection(x) if (pd.notnull(x[\"tweet_full_text\"])) else \"none\", axis=1)\n\n # Remove irrelevant tweets (non-English or unknown-company).\n if drop_irrelevant_tweets:\n df_chunk = df_chunk[\n ((df_chunk['company_derived'] != 'none') &\n (df_chunk['tweet_lang'].str.startswith('en') |\n df_chunk['spaCy_language_detect_all_tweets'].str.startswith('en')\n ))\n ]\n\n # Write each chunk to the combined dataset file.\n df_chunk[required_fields].to_csv(f\"{dataset_filepath}.csv\", index=False, quoting=csv.QUOTE_NONNUMERIC,\n mode='a', header=include_header)\n\n # Write select attributes within each chunk to a separate dataset file to reduce file size.\n df_chunk[extra_fields].to_csv(f\"{dataset_filepath}-extra.csv\", index=False,\n quoting=csv.QUOTE_NONNUMERIC, mode='a', header=include_header)\n\n # Print a progress message.\n count += df_chunk.shape[0]\n # Only include the header once, at the top of the file.\n include_header = False\n log.info(f'\\t\\tprocessed {count} records...')\n\n # Debug purposes - test on a small subset by setting to small chunk size and breaking out of loop.\n break\n\n # Drop duplicate rows/examples/Tweets.\n df_full = pd.read_csv(f\"{dataset_filepath}.csv\", sep=',', encoding=\"utf-8\")\n df_full.drop_duplicates(inplace=True)\n # df_full.dropna(how=\"all\")\n df_full.to_csv(f\"{dataset_filepath}.csv\",\n index=False, header=True, quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n df_full.to_json(f\"{dataset_filepath}.json\", orient='records', lines=True)\n\n df_extra = pd.read_csv(f\"{dataset_filepath}-extra.csv\", sep=',', encoding=\"utf-8\")\n df_extra.drop_duplicates(inplace=True)\n df_extra.to_csv(f\"{dataset_filepath}-extra.csv\",\n index=False, header=True, quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n df_extra.to_json(f\"{dataset_filepath}-extra.json\", orient='records', lines=True)\n\n log.info(f'\\tsaved the dataset to {dataset_filepath}'\n f'\\n\\t\\tunknown company count: {unknown_company_count_global}'\n f'\\n\\t\\tnon-English count: {non_english_count_global}'\n )", "def load_dataset(as_wide=False, label_cols='data_id'):\n df = pd.read_feather(FLOWER_PATH)\n # BUG in dataset with duplicate values\n df = df.drop_duplicates()\n df.loc[:, 'reflectance'] = df['reflectance'].fillna(0)\n df.loc[df['reflectance'] < 0, 'reflectance'] = 0\n\n if as_wide:\n return pd.pivot_table(df, 'reflectance', 'wavelengths', label_cols).fillna(0)\n\n return df", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def load_data(loc='./data/', sp=None):\n trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]\n trainS, devS, testS = [],[],[]\n print('loc', loc)\n with codecs.open(os.path.join(loc, 'SICK_train.txt'), mode='rb', encoding='utf-8') as f:\n for line in f:\n text = unicode_tr(line).lower().strip().split('\\t')\n trainA.append(encode_sentence(text[1], sp))\n trainB.append(encode_sentence(text[2], sp))\n trainS.append(text[3])\n with codecs.open(os.path.join(loc, 'SICK_trial.txt'), mode='rb', encoding='utf-8') as f:\n for line in f:\n text = unicode_tr(line).lower().strip().split('\\t')\n devA.append(encode_sentence(text[1], sp))\n devB.append(encode_sentence(text[2], sp))\n devS.append(text[3])\n with codecs.open(os.path.join(loc, 'SICK_test_annotated.txt'), mode='rb', encoding='utf-8') as f:\n for line in f:\n text = unicode_tr(line).lower().strip().split('\\t')\n testA.append(encode_sentence(text[1], sp))\n testB.append(encode_sentence(text[2], sp))\n testS.append(text[3])\n\n trainS = [float(s) for s in trainS[1:]]\n devS = [float(s) for s in devS[1:]]\n testS = [float(s) for s in testS[1:]]\n\n return [trainA[1:], trainB[1:]], [devA[1:], devB[1:]], [testA[1:], testB[1:]], [trainS, devS, testS]", "def load_data():\n\n base = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/'\n fname = 'BSR_bsds500.tgz'\n\n path = get_file(fname,\n origin = base + fname,\n cache_dir = DEFAULT_CACHE_DIR,\n dset_name = 'bsds500')\n\n f = tarfile.open(path)\n\n train_data = []\n test_data = []\n for name in f.getnames():\n if name.startswith('BSR/BSDS500/data/images/train/'):\n try:\n fp = f.extractfile(name)\n img = imageio.imread(fp)\n train_data.append(img)\n except:\n continue\n elif name.startswith('BSR/BSDS500/data/images/test/'):\n try:\n fp = f.extractfile(name)\n img = skimage.io.imread(fp)\n test_data.append(img)\n except:\n continue\n\n\n return (train_data, test_data)", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def prepare_datasets(target_filename='data'):\n data_cornell = np.array(datasets.readCornellData('__data__/cornell/', max_len=1000000))\n data_opensubs = np.array(datasets.readOpensubsData('__data__/opensubs/', max_len=1000000))\n\n data = np.concatenate([data_cornell, data_opensubs], axis=0)\n del data_cornell, data_opensubs\n\n pd.DataFrame(data, columns=('question', 'answer')).to_feather('__data__/'+target_filename+'.feather')", "def import_squad_data():\n\n squad_url = (\n \"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json\"\n )\n squad_file = squad_url.split(\"/\")[-1] # last part of URL\n\n urllib.request.urlretrieve(squad_url, squad_file)\n\n if not os.path.isfile(squad_file):\n sys.exit(\"Dataset %s does not exist!\" % squad_file)\n\n with open(squad_file) as squad_file_handle:\n squad_data = json.load(squad_file_handle)[\"data\"]\n\n title_list = []\n ident_list = []\n context_list = []\n question_list = []\n impossible_list = []\n answer_start_list = []\n answer_text_list = []\n\n # 'data' contains title and paragraph list\n for it_art in squad_data:\n title = it_art[\"title\"]\n\n # 'paragraphs' contains context (the copy) and Q&A sets\n for it_par in it_art[\"paragraphs\"]:\n context = it_par[\"context\"]\n\n # 'qas' contains questions and reference answers\n for it_que in it_par[\"qas\"]:\n question = it_que[\"question\"]\n impossible = it_que[\"is_impossible\"]\n ident = it_que[\"id\"]\n\n # 'answers' contains the answer text and location in 'context'\n for it_ans in it_que[\"answers\"]:\n answer_start = it_ans[\"answer_start\"]\n text = it_ans[\"text\"]\n\n # set an empty answer for an impossible question\n if impossible:\n text = \"\"\n\n # add details of this answer to the list\n title_list.append(title)\n ident_list.append(ident)\n context_list.append(context)\n question_list.append(question)\n impossible_list.append(impossible)\n answer_start_list.append(answer_start)\n answer_text_list.append(text)\n\n squad_data_final = pandas.DataFrame(\n {\n \"id\": ident_list,\n \"subject\": title_list,\n \"context\": context_list,\n \"question\": question_list,\n \"clean_question\": [clean(question) for question in question_list],\n \"impossible\": impossible_list,\n \"answer_start\": answer_start_list,\n \"answer\": answer_text_list,\n }\n )\n\n return squad_data_final.drop_duplicates(keep=\"first\")", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def load_skills():\n\n Skill.query.delete()\n\n # get all the qualifications text from postings\n postings = db.session.query(Posting.qualifications).all()\n # combine qualifications into a list\n all_skills = []\n with open('filler.txt') as filler:\n del_words = filler.read()\n for post in postings:\n words = post.qualifications.lower().split()\n # iterate through a list of those skills\n for word in words:\n word = word.strip(\"-()/\\,.:;* 1234567890\")\n # check to see if that word isn't in our filler document\n # if not, add it to the table\n if word not in del_words and word not in all_skills:\n all_skills.append(word)\n skill = Skill(skill=word)\n db.session.add(skill)\n db.session.commit()", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def load_sdf_dataset(path,\n label_col,\n data_dir=None,\n explicit_H=True,\n use_chirality=False,\n use_molecular_attributes=False,\n all_pair_features=True,\n graph_distance=False):\n label_csv = path + '.csv'\n label_df = pd.read_csv(label_csv)\n labels = np.array(label_df[label_col])\n \n MolSupp = Chem.SDMolSupplier(path, \n sanitize=False,\n removeHs=not explicit_H,\n strictParsing=False)\n dataset = MolDataset(root=data_dir)\n batch_graphs = []\n for i, (mol, l) in enumerate(zip(MolSupp, labels)):\n if i > 0 and i % 1000 == 0:\n print(\"Featurized %d molecules\" % i)\n dataset.add_graph_batch(batch_graphs)\n batch_graphs = []\n if mol is None:\n print(\"W Error loading molecule %d\" % i)\n g = mol_to_graph(mol,\n explicit_H=explicit_H,\n use_chirality=use_chirality,\n use_molecular_attributes=use_molecular_attributes,\n all_pair_features=all_pair_features,\n graph_distance=graph_distance)\n try:\n g.smi = Chem.MolToSmiles(mol)\n except Exception as e:\n print(e)\n print(\"W Error generating SMILES for molecule %d\" % i)\n g.smi = ''\n w = (l==l) * 1\n y = copy.deepcopy(l)\n y[np.where(y != y)] = 0.\n g.y = t.from_numpy(y).long()\n g.w = t.from_numpy(w).float()\n batch_graphs.append(g)\n dataset.add_graph_batch(batch_graphs)\n return dataset", "def load_data_raw(source='local'):\n import os\n\n if source == 'kaggle':\n # Download the Ames Housing Dataset\n # Set the enviroment variables\n import zipfile\n os.environ['KAGGLE_USERNAME'] = \"lubomrstraka\"\n os.environ['KAGGLE_KEY'] = \"c7347462ef834e6645ce238c2f2fa561\"\n \n # Import dependencies\n os.system(\"pip install kaggle --upgrade --quiet\")\n\n # Download datasets\n os.system(\"kaggle competitions download -c house-prices-advanced-regression-techniques --quiet\")\n \n # Extract data\n with zipfile.ZipFile('house-prices-advanced-regression-techniques.zip', 'r') as archive:\n archive.extractall()\n\n # Read Train & Test Baseline Data\n train_bl = pd.read_csv('train.csv', index_col='Id')\n test_bl = pd.read_csv('test.csv', index_col='Id')\n \n # Using local data used while registering dataset\n # prevents \"Too many requests\" response from Kaggle\n elif source == 'local': \n train_bl = pd.read_csv('train.csv', index_col='Id')\n test_bl = pd.read_csv('test.csv', index_col='Id')\n \n else:\n print('Missing data!')\n train_bl = pd.DataFrame()\n test_bl = pd.DataFrame()\n\n\n return train_bl, test_bl" ]
[ "0.5910811", "0.58030725", "0.5660123", "0.5597031", "0.5595254", "0.5586176", "0.5569032", "0.55044585", "0.5473611", "0.54603606", "0.5439093", "0.54336035", "0.5362078", "0.53447014", "0.5343428", "0.5316086", "0.5292347", "0.52869546", "0.52805156", "0.5272248", "0.527162", "0.52686495", "0.52670616", "0.5265953", "0.5238661", "0.5232401", "0.52250683", "0.5224032", "0.5217263", "0.5198975" ]
0.64931226
0
Test adding a class to widget without preexisting class attribute
def test_add_class_to_empty_widget(): widget = forms.CheckboxInput(attrs={}) new_class = "new-class" expected = {"attrs": {"class": "new-class"}} result = add_class(widget.__dict__, new_class) assert result["attrs"] == expected["attrs"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_class_to_widget_with_class():\n widget = forms.CheckboxInput(attrs={\"class\": \"old-class\"})\n new_class = \"new-class\"\n expected = {\"attrs\": {\"class\": \"old-class new-class\"}}\n result = add_class(widget.__dict__, new_class)\n assert result[\"attrs\"] == expected[\"attrs\"]", "def test_add_widget_classes_simple():\n form = ExampleForm({\"text\": \"some text here\"})\n field = form[\"text\"]\n result = add_widget_classes(field)\n expected = '<input type=\"text\" name=\"text\" value=\"some text here\" class=\"nhsuk-input\" required id=\"id_text\">'\n assert result == expected", "def test_add_widget_classes_error_on_field():\n form = ExampleForm({\"text\": \"\"})\n field = form[\"text\"]\n result = add_widget_classes(field)\n expected = '<input type=\"text\" name=\"text\" class=\"nhsuk-input nhsuk-input--error\" required id=\"id_text\">'\n assert result == expected", "def test_html_attr_class_settable(self):\n CUSTOM_HTML_CLASS = 'myowncss'\n attrs = {'class': CUSTOM_HTML_CLASS}\n w = SelectMultipleField()\n tag = w.render('test', self.choices[1][0], attrs, self.choices)\n self.assertEqual(tag.count(CUSTOM_HTML_CLASS), 1)\n self.assertEqual(tag.count(HTML_ATTR_CLASS), 0)", "def XPGetWidgetClassFunc(inWidgetClass):\n pass", "def test_class_attributes():\n\n string_class_dict = {\"class\": \"spam\"}\n string_class = hr.Element(\"this is some text\", **string_class_dict)\n assert get_opening_line(string_class) == '<html class=\"spam\">'\n\n clas = hr.Element(\"this is some text\", clas=\"spam\") # cspell:disable-line\n assert get_opening_line(clas) == '<html class=\"spam\">' # cspell:disable-line\n\n _clas = hr.Element(\"this is some text\", _clas=\"spam\") # cspell:disable-line\n assert get_opening_line(_clas) == '<html class=\"spam\">' # cspell:disable-line\n\n _class = hr.Element(\"this is some text\", _class=\"spam\") # cspell:disable-line\n assert get_opening_line(_class) == '<html class=\"spam\">' # cspell:disable-line", "def test_constructor(self):\n f = ListingForm()\n self.assertEqual('form-control', f.fields['comments'].widget.attrs['class'])\n self.assertEqual('form-control', f.fields['asking_price'].widget.attrs['class'])", "def setup_class(klass):", "def setup_class(klass):", "def test_classes(self):\r\n css_classes = [\r\n ('unsubmitted', 'unanswered'),\r\n ('incomplete', 'incorrect'),\r\n ('queued', 'processing'),\r\n ('correct', 'correct'),\r\n ('test', 'test'),\r\n ]\r\n for status, classname in css_classes:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.classname, classname)", "def add_class_to_widget(widget, *css_classes):\n css_string = \" \".join(css_classes)\n if 'class' in widget.attrs:\n widget.attrs['class'] += ' {} '.format(css_string)\n else:\n widget.attrs['class'] = css_string", "def test_contains_when_class_init_requires_arguments(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Butterfree(Pokemon):\n element = 'bug'\n\n def __init__(self, name):\n super(Butterfree, self).__init__(name)\n\n self.assertTrue('bug' in registry)", "def test_class_no_class(self):\n html = '<span><div class=\"pink\">test</div><div>t2</div></span>'\n css = '.pink { font-size: 1em; }'\n expected = '<span><div class=\"pink\" style=\"font-size: 1em;\">test</div><div>t2</div></span>'\n result = inline_css(html, css, pretty_print=False)\n self.assertEqual(expected, result)", "def identify_class(self, cls):", "def testClassNotMutable(self):\n self.assertRaises(AttributeError,\n setattr,\n Color,\n 'something_new',\n 10)", "def test_class_attribute() -> None:\n assert get_type_hints(lmp.tknzr._bpe.BPETknzr) == {'tknzr_name': ClassVar[str]}\n assert lmp.tknzr._bpe.BPETknzr.tknzr_name == 'BPE'", "def test_class_method(self):\n self.assertEqual(self.Test.unscoped.im_self.__name__, 'Test')", "def test_all_no_class(self):", "def test_all_no_class(self):", "def test_has_select_multiple_class(self):\n w = SelectMultipleField()\n tag = w.render('test', self.choices[1][0], choices=self.choices)\n self.assertEqual(tag.count(HTML_ATTR_CLASS), 1)", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_constructor(self):\n f = TransactionRequestForm()\n self.assertEqual('form-control', f.fields['text'].widget.attrs['class'])\n self.assertEqual(5, f.fields['text'].widget.attrs['rows'])\n self.assertEqual('form-control', f.fields['price'].widget.attrs['class'])", "def test_standard_unit_class_not_set(self) -> None:\n # Arrange.\n MyType.clear_interning_cache()\n # Make it look like we have no standard unit for this type.\n MyType._STANDARD_UNIT_CLASS = None\n\n # Create a new instance.\n my_type = MyType.decorate(MyUnit)\n\n # Act and assert.\n with pytest.raises(UnitError, match=\"no standard\"):\n my_type.standard_unit_class()", "def test_only_correct_widget_classes(self):\n original_setting = self.form.adjust_label_width\n self.form.adjust_label_width = True\n allowed = self.get_allowed_width_fields()\n reject_fields = {name: field for name, field in self.form.fields.items() if name not in allowed}\n expected = {}\n actual = self.form.determine_label_width(reject_fields)\n self.assertEqual(expected, actual)\n self.form.adjust_label_width = original_setting", "def test_chain_method(qtbot, attr, dayu_type):\n widget = MPushButton()\n if attr:\n getattr(widget, attr)()\n # widget.set_dayu_type(dayu_type)\n qtbot.addWidget(widget)\n assert widget.property('dayu_type') == dayu_type\n assert widget.property('dayu_size') == dayu_theme.default_size", "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "def test_class_ended(self, cls):", "def assert_is_static_class(cls, key):\n assert key in flags\n assert flags[key]['has_init_run']\n assert flags[key]['was_self_none_during_init']\n\n with pytest.raises(NotImplementedError):\n _ = cls()", "def test_invalid_tag(tag: str) -> None:\n with pytest.raises(Exception):\n class InvalidClass1(Base, yaml_tag=tag):\n ...", "def test_class_exists(self):\n\n self.assertTrue(hasattr(Account, self.klass_name))" ]
[ "0.741316", "0.6779076", "0.67461246", "0.638963", "0.63019997", "0.61572343", "0.6032029", "0.6026303", "0.6026303", "0.59760284", "0.5943312", "0.59246916", "0.59152126", "0.5820595", "0.57375777", "0.57242674", "0.570726", "0.5699915", "0.5699915", "0.5669875", "0.5648169", "0.5642842", "0.56406605", "0.56027997", "0.55536664", "0.5548596", "0.55081403", "0.54891247", "0.5485438", "0.54805946" ]
0.7792497
0
Test adding a class to widget with preexisting class attribute
def test_add_class_to_widget_with_class(): widget = forms.CheckboxInput(attrs={"class": "old-class"}) new_class = "new-class" expected = {"attrs": {"class": "old-class new-class"}} result = add_class(widget.__dict__, new_class) assert result["attrs"] == expected["attrs"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_class_to_empty_widget():\n widget = forms.CheckboxInput(attrs={})\n new_class = \"new-class\"\n expected = {\"attrs\": {\"class\": \"new-class\"}}\n result = add_class(widget.__dict__, new_class)\n assert result[\"attrs\"] == expected[\"attrs\"]", "def test_add_widget_classes_simple():\n form = ExampleForm({\"text\": \"some text here\"})\n field = form[\"text\"]\n result = add_widget_classes(field)\n expected = '<input type=\"text\" name=\"text\" value=\"some text here\" class=\"nhsuk-input\" required id=\"id_text\">'\n assert result == expected", "def test_html_attr_class_settable(self):\n CUSTOM_HTML_CLASS = 'myowncss'\n attrs = {'class': CUSTOM_HTML_CLASS}\n w = SelectMultipleField()\n tag = w.render('test', self.choices[1][0], attrs, self.choices)\n self.assertEqual(tag.count(CUSTOM_HTML_CLASS), 1)\n self.assertEqual(tag.count(HTML_ATTR_CLASS), 0)", "def test_add_widget_classes_error_on_field():\n form = ExampleForm({\"text\": \"\"})\n field = form[\"text\"]\n result = add_widget_classes(field)\n expected = '<input type=\"text\" name=\"text\" class=\"nhsuk-input nhsuk-input--error\" required id=\"id_text\">'\n assert result == expected", "def add_class_to_widget(widget, *css_classes):\n css_string = \" \".join(css_classes)\n if 'class' in widget.attrs:\n widget.attrs['class'] += ' {} '.format(css_string)\n else:\n widget.attrs['class'] = css_string", "def test_classes(self):\r\n css_classes = [\r\n ('unsubmitted', 'unanswered'),\r\n ('incomplete', 'incorrect'),\r\n ('queued', 'processing'),\r\n ('correct', 'correct'),\r\n ('test', 'test'),\r\n ]\r\n for status, classname in css_classes:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.classname, classname)", "def test_class_attributes():\n\n string_class_dict = {\"class\": \"spam\"}\n string_class = hr.Element(\"this is some text\", **string_class_dict)\n assert get_opening_line(string_class) == '<html class=\"spam\">'\n\n clas = hr.Element(\"this is some text\", clas=\"spam\") # cspell:disable-line\n assert get_opening_line(clas) == '<html class=\"spam\">' # cspell:disable-line\n\n _clas = hr.Element(\"this is some text\", _clas=\"spam\") # cspell:disable-line\n assert get_opening_line(_clas) == '<html class=\"spam\">' # cspell:disable-line\n\n _class = hr.Element(\"this is some text\", _class=\"spam\") # cspell:disable-line\n assert get_opening_line(_class) == '<html class=\"spam\">' # cspell:disable-line", "def setup_class(klass):", "def setup_class(klass):", "def XPGetWidgetClassFunc(inWidgetClass):\n pass", "def test_constructor(self):\n f = ListingForm()\n self.assertEqual('form-control', f.fields['comments'].widget.attrs['class'])\n self.assertEqual('form-control', f.fields['asking_price'].widget.attrs['class'])", "def update_css_class(kwargs, class_name):\n if \"className\" in kwargs:\n kwargs[\"className\"] += f\" {class_name}\"\n else:\n kwargs[\"className\"] = class_name", "def test_contains_when_class_init_requires_arguments(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Butterfree(Pokemon):\n element = 'bug'\n\n def __init__(self, name):\n super(Butterfree, self).__init__(name)\n\n self.assertTrue('bug' in registry)", "def identify_class(self, cls):", "def test_class_attribute() -> None:\n assert get_type_hints(lmp.tknzr._bpe.BPETknzr) == {'tknzr_name': ClassVar[str]}\n assert lmp.tknzr._bpe.BPETknzr.tknzr_name == 'BPE'", "def test_has_select_multiple_class(self):\n w = SelectMultipleField()\n tag = w.render('test', self.choices[1][0], choices=self.choices)\n self.assertEqual(tag.count(HTML_ATTR_CLASS), 1)", "def test_classproperty_with_class(self):\n self.assertEqual(ClassPropertyTest.class_property, ClassPropertyTest)", "def test_constructor(self):\n f = TransactionRequestForm()\n self.assertEqual('form-control', f.fields['text'].widget.attrs['class'])\n self.assertEqual(5, f.fields['text'].widget.attrs['rows'])\n self.assertEqual('form-control', f.fields['price'].widget.attrs['class'])", "def test_chain_method(qtbot, attr, dayu_type):\n widget = MPushButton()\n if attr:\n getattr(widget, attr)()\n # widget.set_dayu_type(dayu_type)\n qtbot.addWidget(widget)\n assert widget.property('dayu_type') == dayu_type\n assert widget.property('dayu_size') == dayu_theme.default_size", "def check_class_in_element():\n nonlocal class_not_expected\n result = []\n expected_class_ls = expected_class.split(\" \")\n actual_class = element.get_attribute(\"class\")\n for class_ in expected_class_ls:\n for element_class_ in actual_class.split(\" \"):\n if element_class_ == class_:\n result.append(element)\n if len(result) == len(expected_class_ls):\n return element\n if class_not_expected is None:\n class_not_expected = actual_class\n return False", "def test_class_attribute_pattern(content, expected):\n match = champollion.parser.js_class._CLASS_ATTRIBUTE_PATTERN.search(\n content\n )\n if expected is None:\n assert match is None\n else:\n assert match.groupdict() == expected", "def test_classproperty(self):\n self.assertEqual(MyClass.foo, \"Foo\")", "def CSSClasses(self):", "def testClassNotMutable(self):\n self.assertRaises(AttributeError,\n setattr,\n Color,\n 'something_new',\n 10)", "def setup_class(cls):", "def setup_class(cls):", "def test_01_CheckClassTool(self):\n portal = self.portal\n self.assertNotEqual(None,getattr(portal,'portal_classes',None))\n self.commit()\n # check if web UI works\n portal_classes = portal.portal_classes\n portal_classes.manage_viewDocumentList()\n portal_classes.manage_viewPropertySheetList()\n portal_classes.manage_viewConstraintList()\n portal_classes.manage_viewExtensionList()\n portal_classes.manage_viewTestList()", "def choose_class(self, *args, **kwargs):", "def test_class_started(self, cls):", "def test_class(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass\",\n )\n content = self._get_fake_project_class()\n\n expected = textwrap.dedent(\n '''\\\n class MyKlass(object):\n \"\"\"A class that does something.\n\n Multi-line information here.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"asdfasdf\"\n\n def __init__(self, value):\n \"\"\"Create this instance.\"\"\"\n # A comment that should show up in the unittest's results\n super(MyKlass, self).__init__()\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter" ]
[ "0.77393425", "0.6972124", "0.6752947", "0.67016345", "0.6452122", "0.6351119", "0.6323454", "0.62827134", "0.62827134", "0.616877", "0.6088557", "0.59887946", "0.59715176", "0.5937744", "0.5933207", "0.59048957", "0.5755267", "0.57037884", "0.569965", "0.56985956", "0.5690546", "0.5688317", "0.56115466", "0.5593561", "0.5591832", "0.5591832", "0.55846506", "0.5569766", "0.55600154", "0.55518264" ]
0.7889995
0
Test widget is not a checkbox
def test_widget_is_not_checkbox(): form = ExampleForm() field = form["text"] assert is_checkbox(field) is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_widget_is_checkbox():\n form = ExampleForm()\n field = form[\"checkbox\"]\n assert is_checkbox(field) is True", "def is_checkbox(field):\n return isinstance(field.field.widget, forms.CheckboxInput)", "def _create_boolean_widget(self,frame,name,widget_options):\n # CB: might be necessary to pass actions to command option of Checkbutton;\n # could be cause of test pattern boolean not working?\n return T.Checkbutton(frame,variable=self._tkvars[name],**widget_options)", "def create_type_widget(self):\n self._chb_bool = QtWidgets.QCheckBox()\n return self._chb_bool", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def is_checkboxes(field):\n return isinstance(field.field.widget, forms.CheckboxSelectMultiple)", "def _onCheckBox(self, widget):\n widget.setStateCheck(not widget.getStateCheck())", "def is_checked(self):\n\treturn self._Widget__w['isChecked'] == 'true'", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_filter_not_available_plugins(plugin_dialog_constructor):\n item = plugin_dialog_constructor.available_list.item(0)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n if widget:\n assert not widget.action_button.isEnabled()\n assert widget.warning_tooltip.isVisible()\n\n item = plugin_dialog_constructor.available_list.item(1)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n assert widget.action_button.isEnabled()\n assert not widget.warning_tooltip.isVisible()", "def check_alive(cw: CustomWidget) -> NoReturn:\r\n ...", "def checkMyWorkBox(self):\n self.util.waitForElementToBePresent(self.element.my_work_checkbox)\n checkbox = self.util.driver.find_element_by_xpath(self.element.my_work_checkbox)\n if not checkbox.is_selected():\n self.util.clickOn(self.element.my_work_checkbox)", "def test_add_class_to_empty_widget():\n widget = forms.CheckboxInput(attrs={})\n new_class = \"new-class\"\n expected = {\"attrs\": {\"class\": \"new-class\"}}\n result = add_class(widget.__dict__, new_class)\n assert result[\"attrs\"] == expected[\"attrs\"]", "def checkbox(self):\r\n return self._checkbox", "def test_bool_field():", "def bool_checkbox(init: bool = False, descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_BoolCheckBox(StdInputWidgetBase, QCheckBox):\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QCheckBox.__init__(self)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.stateChanged.connect(self.state_changed)\n\n # initial value\n with self._prevent_update:\n self.setChecked(init)\n\n @property\n def val(self) -> data_type:\n return data_type(self.isChecked())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n def state_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, bool):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n StdInpWidget_BoolCheckBox.__doc__ = descr\n\n return StdInpWidget_BoolCheckBox", "def uiCheckboxChecked(checkbox):\n\n return clibui.uiCheckboxChecked(checkbox)", "def trivialFieldChecker(self, event):\n\n self.widget_insert_page_item[event.widget].is_good_flag = True", "def test_constructor_visible_widgets(plugin_dialog_constructor):\n assert not plugin_dialog_constructor.direct_entry_edit.isVisible()\n assert not plugin_dialog_constructor.direct_entry_btn.isVisible()", "def is_button(widget):\n # CEBALERT: document why try/except is needed\n try:\n button = 'command' in widget.config() and not hasattr(widget,'toggle')\n except T.TclError:\n button = False\n return button", "def test_radioselect_field():", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def uncheckMyWorkBox(self):\n self.util.waitForElementToBePresent(self.element.my_work_checkbox)\n checkbox = self.util.driver.find_element_by_xpath(self.element.my_work_checkbox)\n if checkbox.is_selected():\n self.util.clickOn(self.element.my_work_checkbox)", "def test_empty_ui(self):", "def create_false_widget(measure, event):\n if measure:\n return FalseObject(parent=MeasureEditorDockItem(measure=measure),\n event=event)\n else:\n return FalseObject(parent=Object(), event=event)", "def isWidgetSelected(self, QWidget): # real signature unknown; restored from __doc__\n return False", "def new_varEnabledWidget():\n newWidget = QtGui.QCheckBox()\n newWidget.setChecked(True)\n return newWidget", "def XPIsWidgetVisible(inWidget):\n pass", "def checkbox_should_not_be_selected(self, locator):\n self._info(\"Verifying checkbox '%s' is not selected.\" % locator)\n if self._selenium.is_checked(self._parse_locator(locator)):\n raise AssertionError(\"Checkbox '%s' should not have been selected\"\n % locator)", "def _onTypeClick(self, widget):\n old_value = self.selection\n self.selection = widget.getUserString(\"value\")\n \n if not self.event_update():\n self.selection = old_value\n return\n \n for _widg in self.widgets:\n _widg.setStateCheck(self.selection == _widg.getUserString(\"value\"))" ]
[ "0.78761363", "0.6998184", "0.66463333", "0.6511762", "0.63926727", "0.6291336", "0.6278951", "0.6221354", "0.61232686", "0.60545945", "0.6017455", "0.6001514", "0.5926528", "0.59030956", "0.59027416", "0.590179", "0.5807385", "0.5779362", "0.5761654", "0.575948", "0.5743314", "0.5731614", "0.57193774", "0.57184184", "0.5714573", "0.57052433", "0.56622577", "0.56196433", "0.5589931", "0.5538823" ]
0.82168114
0
Test widget is a checkbox
def test_widget_is_checkbox(): form = ExampleForm() field = form["checkbox"] assert is_checkbox(field) is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_widget_is_not_checkbox():\n form = ExampleForm()\n field = form[\"text\"]\n assert is_checkbox(field) is False", "def is_checkbox(field):\n return isinstance(field.field.widget, forms.CheckboxInput)", "def _create_boolean_widget(self,frame,name,widget_options):\n # CB: might be necessary to pass actions to command option of Checkbutton;\n # could be cause of test pattern boolean not working?\n return T.Checkbutton(frame,variable=self._tkvars[name],**widget_options)", "def is_checked(self):\n\treturn self._Widget__w['isChecked'] == 'true'", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def uiCheckboxChecked(checkbox):\n\n return clibui.uiCheckboxChecked(checkbox)", "def create_type_widget(self):\n self._chb_bool = QtWidgets.QCheckBox()\n return self._chb_bool", "def bool_checkbox(init: bool = False, descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_BoolCheckBox(StdInputWidgetBase, QCheckBox):\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QCheckBox.__init__(self)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.stateChanged.connect(self.state_changed)\n\n # initial value\n with self._prevent_update:\n self.setChecked(init)\n\n @property\n def val(self) -> data_type:\n return data_type(self.isChecked())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n def state_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, bool):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n StdInpWidget_BoolCheckBox.__doc__ = descr\n\n return StdInpWidget_BoolCheckBox", "def is_checkboxes(field):\n return isinstance(field.field.widget, forms.CheckboxSelectMultiple)", "def checkbox(self):\r\n return self._checkbox", "def checkMyWorkBox(self):\n self.util.waitForElementToBePresent(self.element.my_work_checkbox)\n checkbox = self.util.driver.find_element_by_xpath(self.element.my_work_checkbox)\n if not checkbox.is_selected():\n self.util.clickOn(self.element.my_work_checkbox)", "def check_box(self, grid: object, name: str, xposition: int, yposition: int,\n synchronize: bool = False, xspan: int = 1, yspan: int = 1) -> QtWidgets.QCheckBox:\n label = QtWidgets.QLabel()\n label.setText(TR().tr(name) + ':')\n grid.addWidget(label, yposition, xposition, 1, 1)\n\n input = QtWidgets.QCheckBox()\n input.setObjectName(name)\n if synchronize:\n self.synchronize(input)\n grid.addWidget(input, yposition, xposition + 1, yspan, xspan)\n input.stateChanged.connect(self.data_changed)\n\n return input", "def _onCheckBox(self, widget):\n widget.setStateCheck(not widget.getStateCheck())", "def IsItemChecked(self, item):\r\n\r\n return item.IsChecked()", "def GridCheck(Parent,DefaultSelected,Row,Column):\r\n dummyvar = IntVar()\r\n C = Checkbutton(Parent,var=dummyvar)\r\n if DefaultSelected == 1:\r\n C.select()\r\n C.grid(row=Row,column=Column)\r\n C.isChecked = dummyvar\r\n return C", "def test_bool_field():", "def checkbox(self, label, initial=False, handler=None, **kwargs):\n handler = self._changed_handler(handler)\n cb = wx.CheckBox(self, label=label)\n #cb.span = 2\n cb.SetValue(initial)\n cb.Bind(wx.EVT_CHECKBOX, handler)\n self.pack(\"\", cb, **kwargs)\n return cb", "def new_varEnabledWidget():\n newWidget = QtGui.QCheckBox()\n newWidget.setChecked(True)\n return newWidget", "def check(self, element_tuple, *, wrapper_element_tuple=None):\n self.log_info(f\"Browser.check: Setting {element_tuple} checkbox to checked\")\n checkbox = self.CORE.find_element(*self.format_element(element_tuple))\n if not checkbox.is_selected():\n if wrapper_element_tuple is not None:\n self.log_info(f\"Browser.check: Wrapper element was provided, clicking {wrapper_element_tuple} instead\")\n self.click(wrapper_element_tuple)\n else:\n self.click(element_tuple)\n else:\n self.log_info(f\"Browser.check: Skipping action as {element_tuple} is already checked\")\n return", "def uiCheckboxPointer(obj):\n\n return ctypes.cast(obj, ctypes.POINTER(uiCheckbox))", "def uiNewCheckbox(text):\n\n # Set return type\n clibui.uiNewCheckbox.restype = ctypes.POINTER(uiCheckbox)\n\n return clibui.uiNewCheckbox(bytes(text, 'utf-8'))", "def click_guarantee_cli_checkbox(self):\n self.click_element(self.guarantee_cli_checkbox_locator)", "def checkBox(parent,label='',pos=defPos,size=defSize,style=0,val=defVal,\r\n name='checkBox',id=defId,onCheck=None,tip=None):\r\n gCheckBox = wx.CheckBox(parent,id,label,pos,size,style,val,name)\r\n if onCheck: gCheckBox.Bind(wx.EVT_CHECKBOX,onCheck)\r\n if tip: gCheckBox.SetToolTip(tooltip(tip))\r\n return gCheckBox", "def click_include_cli_checkbox(self):\n self.click_element(self.include_cli_checkbox_locator)", "def htmlCheckbox(labelText, parName, args, labelAttr='', attr=''):\n snippet = htmlLabel(labelText,parName,labelAttr)\n checked = 'checked=\"checked\"' if parName in args else ''\n snippet += '<input type=\"checkbox\" name=\"%s\"%s%s/>\\n' % (parName,sep(checked),sep(attr))\n return snippet", "def _add_checkbox(self, text, state_changed, tooltip, checked=True,\n enabled=True, button_label=True):\n cbox = QtWidgets.QCheckBox('' if button_label else text, self)\n self.control.layout().addWidget(cbox)\n btn = None\n if button_label:\n btn = QtWidgets.QPushButton(text, self)\n self.control.layout().addWidget(btn)\n\n def cb(checked, cbox=cbox, state_changed=state_changed):\n state_changed(cbox.isChecked(), one_shot=True)\n\n btn.clicked.connect(cb)\n btn.setToolTip(tooltip)\n cbox.setChecked(checked)\n cbox.setEnabled(enabled)\n cbox.stateChanged.connect(state_changed)\n cbox.setToolTip(tooltip)\n self.control.layout().addItem(QtWidgets.QSpacerItem(20, 0))\n return cbox", "def CheckBoxClicked(self,chkb):\r\n\r\n print(\"{} Selecionado.\", format(chkb.text()))", "def click_automate_generation_checkbox(self):\n self.click_element(self.automate_generation_checkbox_locator)", "def _check_state(self):\n if (self.stock_checker.isChecked() or self.future_checker.isChecked()) and self.name.buddy.text():\n self.btn_ok.setEnabled(True)\n self.btn_ok.setDefault(True)\n else:\n self.btn_ok.setEnabled(False)", "def test_add_class_to_empty_widget():\n widget = forms.CheckboxInput(attrs={})\n new_class = \"new-class\"\n expected = {\"attrs\": {\"class\": \"new-class\"}}\n result = add_class(widget.__dict__, new_class)\n assert result[\"attrs\"] == expected[\"attrs\"]" ]
[ "0.77814716", "0.7657677", "0.71951646", "0.7108259", "0.69604677", "0.6959242", "0.69066095", "0.6904737", "0.6780216", "0.6775661", "0.6465918", "0.63346624", "0.6301999", "0.62279457", "0.6089126", "0.60698724", "0.60291535", "0.6010922", "0.59529585", "0.59443384", "0.59322935", "0.5846721", "0.58438975", "0.5835619", "0.5809671", "0.5799273", "0.5793484", "0.5763057", "0.57526994", "0.5736325" ]
0.84474516
0
Remove trailing company labels. Rule
def eliminate_company_labels(coname): company_eliminations = ('llp', 'llc', 'plc', 'limited', 'bv', 'gmbh', 'sa', 'inc') regex = re.compile('\W(' + '|'.join(company_eliminations) + ')\W?$') if len(coname.split(' ')) > 2: coname = regex.sub('', coname) return coname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_rule_for_company(text: str) -> str:\n match = re.search(LABEL_SPECIFICATION[\"RE_COMPANY_PRIMARY\"], text)\n if not match:\n match = re.search(LABEL_SPECIFICATION[\"RE_COMPANY_SECONDARY\"], text)\n if match:\n text = match.group(\"label\").strip()\n if len(text) > 1:\n return text\n else:\n return \"\"\n return \"\"", "def label_rule_for_company(text: str):\n return re.findall(LABEL_SPECIFICATION[\"RE_COMPANY\"], text)", "def consolidate_label(label):\n return label.split(\"-\")[0] if label.startswith(\"O\") else label", "def consolidate_label(label):\n return label.split(\"-\")[0] if label.startswith(\"O\") else label", "def cleanLabel(label):\n if label.startswith('^'):\n label = label[1:] + '_prexisting'\n label = label.replace('?', 'Q') # happens with mRnaCompare filter labels\n return label", "def deCopIfy(text):\n\tif text == \"\":\n\t\treturn text\n\n\tfor lingo in coplingo:\n\t\ttext = re.sub(lingo['regex'], lingo['str'], text)\n\n\treturn text[0].upper() + text[1:]", "def get_company_domain(self) -> str:\n lower_comp_name = self.company_name.lower()\n domain_prefix = re.sub(\"[^0-9a-zA-Z]+\", \"\", lower_comp_name)\n return domain_prefix + self.top_level_domain", "def _sanitize(label):\n return re.sub(r'(\\W+| )', '', label)", "def label_rule_for_others(text: str, label_type: str) -> str:\n match = re.search(LABEL_SPECIFICATION[f\"RE_{label_type.upper()}\"], text)\n if match:\n return match.group(\"label\").strip()\n return \"\"", "def sanitize_label(label: str) -> str:\n if \"-\" in label:\n prefix, suffix = label.split(\"-\", 1)\n suffix = suffix.split(\"(\")[-1]\n return f\"{prefix}-{suffix}\"\n else:\n return label", "def _remove_blank_graph_labels(data):\n # TODO: parse properly\n processed = []\n for line in data.split('\\n'):\n fields = line.split()\n if len(fields) >= 4 and _is_blank(fields[3]):\n fields = fields[:3] + fields[4:]\n processed.append(' '.join(fields))\n return '\\n'.join(processed)", "def remove_common_terms(company_names):\n # this is a mostly automatic compiled list of terms\n # using company_feature_extraction._find_top_idf_words\n # manually removed obvious company names which we do not want to include\n common_terms = ['ltd', 'recruitment', 'limited', 'group', 'services', 'the', 'solutions',\n 'school', 'uk', 'and', 'care', 'of', 'associates', 'consulting', 'resourcing',\n 'personnel', 'search', 'primary', 'council', 'college', 'people', 'it',\n 'selection', 'cleaning', 'london', 'university', 'management', 'international',\n 'home', 'trust', 'plc', 'education', 'resources', 'centre', 'st', 'healthcare',\n 'technical', 'consultancy', 'support', 'hotel', 'street', 'academy',\n 'consultants', 'house', 'company', 'for', 'housing', 'employment',\n 'resource', 'engineering', 'recruit', 'bureau', 'partnership', 'co', 'security',\n 'training', 'health', 'technology', 'global', 'brook', 'business', 'executive',\n 'legal', 'appointments', 'media', 'first', 'service', 'marketing',\n 'association', 'community', 'agency', 'park', 'network', 'financial', 'hr',\n 'sales', 'direct', 'foundation', 'retail', 'professional', 'partners', 'jobs',\n 'nursing', 'society', 'construction', 'staff', 'llp', '.co.uk']\n # watch out for word boundaries \\b\n regex_remove_common_terms = r\"\\b(\" + '|'.join(common_terms) + r\")\\b\"\n return remove_sub_string(regex_remove_common_terms, company_names)", "def _no_comp_suffix(s):\n return re.sub('__(eq|ne|gt|lt|ge|le)$', '', s)", "def _common_label_scope(labels: list[str]) -> str:\n if not labels:\n return ''\n for i in range(len(labels[0]) - 1, -1, -1):\n if labels[0][i] == '/' and all(s.startswith(labels[0][:i + 1]) for s in labels):\n return labels[0][:i + 1]\n return ''", "def compute_company(row):\n global unknown_company_count_global\n associated_company = []\n\n # Identify the target company using known patterns in the tweet text.\n tweet = row['text_derived'].lower()\n author = row['user_screen_name'].lower()\n for company_pattern in PTN_companies:\n if re.compile(author).fullmatch(company_pattern[2]):\n associated_company.append(company_pattern[0])\n break\n if company_pattern[1].search(tweet):\n associated_company.append(company_pattern[0])\n\n if len(associated_company) > 0:\n return '|'.join(associated_company)\n\n # No company pattern applies, so it's unclear how this tweet was selected.\n unknown_company_count_global += 1\n log.warning(f\"\\t\\t\\tunrecognized company (will be dropped): \"\n f\"\\n\\t\\t\\t\\tid: {row['tweet_id']}\"\n f\"\\n\\t\\t\\t\\ttweet: {row['text_derived']}\"\n f\"\\n\\t\\t\\t\\thashtags: {row['tweet_entities_hashtags']}\")\n return 'none'", "def consolidate_labels(labels):\n return list(map(RNNOIE_model.consolidate_label , labels))", "def _analyze_company_name(n, *args, **kwds):\n return analyze_company_name(n, stripNotes=True)", "def cleanup(name):\n cleaned_name = name.rstrip(\".\")\n return cleaned_name", "def reduce_labels(line: str) -> str:\n labels = [\"none\", \"favor\", \"against\"]\n label = line.split(\":\")[1]\n return label if label in labels else \"none\"", "def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()", "def regex_strip_legalname(raw_names):\n \n pattern = r\"(\\s|\\.|\\,|\\&)*(\\.com|Enterprise|Worldwide|Int\\'l|N\\.V\\.|LLC|Co\\b|Inc\\b|Corp\\w*|Group\\sInc|Group|Company|Holdings\\sInc|\\WCo(\\s|\\.)|plc|Ltd|Int'l\\.|Holdings|\\(?Class\\s\\w+\\)?)\\.?\\W?\"\n stripped_names = [re.sub(pattern,'', n) for n in raw_names]\n \n return stripped_names", "def _clean_labels(self, data, label_col='user_labels'):\n\n def clean_up(lbl, tag):\n if not lbl:\n if not tag:\n return 'Unidentified'\n else:\n return 'Non-Prorocentrum'\n elif 'False Prorocentrum' in lbl or \\\n 'Prorocentrum_false_positiveal' in lbl:\n return 'Non-Prorocentrum'\n elif lbl[0] in ['Prorocentrum', 'False Non-Prorocentrum']:\n return lbl[0]\n else:\n return 'Non-Prorocentrum'\n\n df = data.copy()\n df[label_col] = df.apply(lambda x: clean_up(x[label_col],\n x['tags']), axis=1)\n df['label'] = df[label_col].map(self.classes)\n return df", "def return_only_top_level_labels(label_list):\n to_return = []\n for label_name in label_list:\n if label_name.count('/') == 1:\n to_return.append(label_name)\n return to_return", "def removeLabel(edge):\n return edge[:-2]", "def removeLabel(edge):\n return edge[:-2]", "def parse_category_label(label: str) -> str:\n return number_first_regex.sub(\n '_',\n space_regex.sub(\n '_',\n label.strip().lower().replace('*', '').replace('(', '').replace(\n ')', '').replace('.', '')))", "def removeLabels(str2: str):\n str2_arr = []\n last_seen_bracket = []\n for char in str2:\n if char == \"(\" or char == \"[\":\n last_seen_bracket.append(char)\n str2_arr.append(\"-\")\n elif char == \")\" or char == \"]\":\n if len(last_seen_bracket) >= 1:\n last_seen_bracket.pop()\n else:\n continue\n elif char == \"-\" or char == '$':\n continue\n elif len(last_seen_bracket) >= 1:\n continue\n else:\n str2_arr.append(char)\n\n if len(str2_arr) > 1:\n for i in range(len(str2_arr)):\n try:\n if str2_arr[i] == \"-\" and str2_arr[i - 1] == \"-\":\n str2_arr.pop(i - 1)\n # Some segments have dual purpose, so this removes dual dashes that result from this\n except IndexError:\n continue\n\n if str2_arr[len(str2_arr) - 1] == \"\\n\":\n str2_arr.pop()\n\n return \"\".join(str2_arr).rstrip(\"-\").lstrip(\"-\")", "def test_issue_remove_label(self):\n pass", "def removesuffix(self, x) -> String:\n pass", "def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)" ]
[ "0.7212325", "0.6618307", "0.6285967", "0.6285967", "0.592417", "0.5615679", "0.55322164", "0.5510219", "0.5489993", "0.5487208", "0.548587", "0.5438026", "0.5347555", "0.53363866", "0.53179765", "0.53119785", "0.52540267", "0.51799643", "0.51637065", "0.5151782", "0.5139857", "0.5139525", "0.512812", "0.5105666", "0.5105666", "0.50952226", "0.509324", "0.50810957", "0.5023813", "0.49986848" ]
0.791277
0
Returns a squared loss function for dnn to tree distillation.
def create_dnn_to_tree_squared_loss_fn(n_classes): def _dnn_to_tree_squared_loss(dnn_logits, tree_logits, example_weights): return head_lib._mean_squared_loss( # pylint: disable=protected-access labels=_logits_to_label_for_tree(dnn_logits, n_classes), logits=_logits_to_label_for_tree(tree_logits, n_classes), weights=example_weights)[0] return _dnn_to_tree_squared_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l2_loss(embedding, tree, dist_function): \n # split tensor shape = (num_vertices, dim) into num_vertices number of tensors shape = (dim).\n embedding_tuple = torch.split(embedding, 1) \n \n # loss function is the sum of l2 norm (no sqrt) between the space distance and tree distance \n loss = Variable(torch.FloatTensor(torch.zeros(1)))\n\n # calculate the distance between embedding vectors and minus the tree distance\n dist_tensor = []\n for i_idx, i in enumerate(embedding_tuple):\n for j_idx, j in enumerate(embedding_tuple):\n if i_idx <= j_idx: # when i_idx==j_idx (dist=0) as it will lead to NaN loss in backprop\n continue\n dist_tensor.append((dist_function(i,j) - tree[i_idx][j_idx]).pow(2))\n\n # stack the list of calculated distance\n dist_tensor = torch.stack(dist_tensor)\n\n # loss = L2 loss between space distance tensor and tree distance tensor\n loss = dist_tensor.sum()\n \n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SquareLoss(predictedY, y)", "def create_dnn_to_tree_cross_entropy_loss_fn(n_classes):\n\n def _dnn_to_tree_cross_entropy_loss(dnn_logits, tree_logits, example_weights):\n if n_classes == 2:\n return head_lib._log_loss_with_two_classes( # pylint: disable=protected-access\n labels=_logits_to_label_for_tree(dnn_logits, n_classes),\n logits=tree_logits,\n weights=example_weights)[0]\n else:\n return head_lib._softmax_cross_entropy_loss( # pylint: disable=protected-access\n labels=_logits_to_label_for_tree(dnn_logits, n_classes),\n logits=tree_logits,\n weights=example_weights)[0]\n\n return _dnn_to_tree_cross_entropy_loss", "def loss_func(coefs: np.ndarray, X: pd.DataFrame, y: pd.Series, n: int) -> float:\n est = np.dot(X, coefs)\n err = np.sum(np.power(est - y, 2))\n rmse = np.sqrt(err/n)\n return rmse", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def compute_square_loss(X, y, theta):\n N = np.shape(X)[0]\n e = y - X.dot(theta)\n loss = (1/(2*np.float(N)))*e.dot(e)\n return loss", "def dloss(self, output, labels):\n return 2*(output - labels)/labels.shape[1]", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def get_loss(self, states, Q_target):\n return nn.SquareLoss(self.run(states), Q_target)", "def loss(y, y_pred):\n return 0.5 * np.linalg.norm(y_pred - y) ** 2", "def loss(self):\n return la.norm(self.resids) / self.normX", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def __D_loss(self, D, real, fake):\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss", "def l2_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.square(obs - actual), 1)", "def loss(Y, T):\n lossFunction = -(T*np.log(Y)).sum(axis=1).mean(axis=0)\n return lossFunction", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def dnn_loss_calculation(self, labeled_examples, labels):\n predicted_labels, _ = self.DNN(labeled_examples)\n labeled_loss = self.labeled_loss_function(predicted_labels, labels, order=self.settings.labeled_loss_order)\n labeled_loss *= self.settings.labeled_loss_multiplier\n return labeled_loss", "def square_loss(X, Y, W):\n return 1.0 / X.shape[0] * np.linalg.norm(np.dot(X, W) - Y, ord='fro') ** 2", "def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))", "def compute_regularized_square_loss_gradient(X, y, theta, lambda_reg):\n return compute_square_loss_gradient(X,y,theta) + 2*lambda_reg*theta", "def get_loss_fn(params):\r\n i = importlib.import_module(\"dlex.utils.losses\")\r\n return getattr(i, params.loss)", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def compute_regularized_square_loss_gradient(X, y, theta, lambda_reg):\n #TODO\n P = (np.dot(X, theta)-y)\n m = X.shape[0]\n\n return (2/m)*np.dot(X.T, P)+(2*lambda_reg*theta)", "def compute_loss_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n\n return e.dot(e)/(2 * len(e)) + lambda_ * sum(abs(w))", "def compute_square_loss_gradient(X, y, theta):\n #TODO\n P = (np.dot(X, theta)-y)\n m = X.shape[0]\n\n return (2/m)*np.dot(X.T, P)", "def KD(T=8):\n\n def KD_loss(input, teacher_logits):\n return nn.KLDivLoss()(F.log_softmax(input / T, dim=1), F.softmax(teacher_logits / T, dim=1))\n\n return KD_loss", "def regularization_loss(params: hk.Params) -> jnp.ndarray:\r\n\r\n # L1 Loss\r\n sum_in_layer = lambda p: jnp.sum(jnp.abs(p))\r\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\r\n l1_loss = sum(sum_p_layers)\r\n\r\n # L2 Loss\r\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\r\n\r\n return l2_coef * l2_loss + l1_coef * l1_loss" ]
[ "0.6345418", "0.6089642", "0.604723", "0.6034086", "0.6028389", "0.599274", "0.5869035", "0.5813661", "0.5807285", "0.5803496", "0.5777334", "0.5717182", "0.5707835", "0.5707232", "0.5666614", "0.56508774", "0.564901", "0.56327903", "0.55825174", "0.5579805", "0.5578603", "0.5572245", "0.55692255", "0.55645686", "0.5556854", "0.5540079", "0.5534246", "0.5532882", "0.5506454", "0.55049944" ]
0.7447281
0
Returns a cross entropy loss function for dnn to tree distillation.
def create_dnn_to_tree_cross_entropy_loss_fn(n_classes): def _dnn_to_tree_cross_entropy_loss(dnn_logits, tree_logits, example_weights): if n_classes == 2: return head_lib._log_loss_with_two_classes( # pylint: disable=protected-access labels=_logits_to_label_for_tree(dnn_logits, n_classes), logits=tree_logits, weights=example_weights)[0] else: return head_lib._softmax_cross_entropy_loss( # pylint: disable=protected-access labels=_logits_to_label_for_tree(dnn_logits, n_classes), logits=tree_logits, weights=example_weights)[0] return _dnn_to_tree_cross_entropy_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def loss_fn(outputs, labels):\n return nn.CrossEntropyLoss()(outputs, labels)", "def loss_fn(pred: torch.Tensor, truth: torch.Tensor):\n\n if truth is None or pred is None:\n return None\n\n return CrossEntropyLoss()(pred, truth)", "def l2_loss(embedding, tree, dist_function): \n # split tensor shape = (num_vertices, dim) into num_vertices number of tensors shape = (dim).\n embedding_tuple = torch.split(embedding, 1) \n \n # loss function is the sum of l2 norm (no sqrt) between the space distance and tree distance \n loss = Variable(torch.FloatTensor(torch.zeros(1)))\n\n # calculate the distance between embedding vectors and minus the tree distance\n dist_tensor = []\n for i_idx, i in enumerate(embedding_tuple):\n for j_idx, j in enumerate(embedding_tuple):\n if i_idx <= j_idx: # when i_idx==j_idx (dist=0) as it will lead to NaN loss in backprop\n continue\n dist_tensor.append((dist_function(i,j) - tree[i_idx][j_idx]).pow(2))\n\n # stack the list of calculated distance\n dist_tensor = torch.stack(dist_tensor)\n\n # loss = L2 loss between space distance tensor and tree distance tensor\n loss = dist_tensor.sum()\n \n return loss", "def cross_entropy(self):\n return self._cross_entropy_func", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def create_dnn_to_tree_squared_loss_fn(n_classes):\n\n def _dnn_to_tree_squared_loss(dnn_logits, tree_logits, example_weights):\n return head_lib._mean_squared_loss( # pylint: disable=protected-access\n labels=_logits_to_label_for_tree(dnn_logits, n_classes),\n logits=_logits_to_label_for_tree(tree_logits, n_classes),\n weights=example_weights)[0]\n\n return _dnn_to_tree_squared_loss", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def cross_entropy_loss(outputs, labels): \n# works properly\n \n m = labels.shape[0]\n p = outputs\n log_likelihood = -1*torch.log(p[range(m),labels])\n loss = torch.sum(log_likelihood) / m\n return loss.item()", "def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss", "def criterion_kd(helper, outputs, targets, teacher_outputs):\n alpha = helper.alpha\n T = helper.temperature\n KD_loss = torch.nn.KLDivLoss()(torch.nn.functional.log_softmax(outputs/T, dim=1),\n torch.nn.functional.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \\\n torch.nn.functional.cross_entropy(outputs, targets) * (1. - alpha)\n return KD_loss", "def KD(T=8):\n\n def KD_loss(input, teacher_logits):\n return nn.KLDivLoss()(F.log_softmax(input / T, dim=1), F.softmax(teacher_logits / T, dim=1))\n\n return KD_loss", "def CE():\n def CE_loss(input,target):\n return nn.CrossEntropyLoss()(input.squeeze(), target)\n\n return CE_loss", "def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)", "def _graph_fn_entropy(distribution):\n return distribution.entropy()", "def cross_entropy_fn(fc8, ground_truth_input):\n with tf.name_scope('cross_entropy') as scope:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=fc8, labels=ground_truth_input)\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n\n tf.summary.scalar(scope + '/loss', cross_entropy_mean)\n\n return cross_entropy_mean", "def set_loss_function(self):\r\n self.criterion = nn.CrossEntropyLoss().cuda()", "def cross_entropy(y_observed, p):\n\n pass", "def cross_entropy_loss(batch_out, batch_gt):\r\n criterion = torch.nn.CrossEntropyLoss()\r\n target = torch.argmax(batch_gt, 1)\r\n loss = criterion(batch_out, target)\r\n\r\n return loss", "def loss_calc(pred, label, device):\n # out shape batch_size x channels x h x w -> batch_size x channels x h x w\n # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w\n label = Variable(label.long()).to(device)\n criterion = CrossEntropy2d().to(device)\n\n return criterion(pred, label)", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def _initLoss(self):\n\n return torch.nn.CrossEntropyLoss()", "def loss_function(self, targets, outputs):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=outputs)\n return tf.reduce_mean(cross_entropy)", "def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential", "def loss_function(recon_x, x, mu, logvar, input_size):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, input_size), size_average=False)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n return BCE + KLD", "def decision_tree_clf():\n\tclf_entropy = DecisionTreeClassifier(\n\t\tcriterion = \"entropy\", random_state = seed,\n\t\tmax_depth = 3, min_samples_leaf = 5\n\t\t)\n\treturn clf_entropy", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def KD_CE(alpha=0.5, T=8):\n\n def KD_loss(input, teacher_logits):\n return nn.KLDivLoss()(F.log_softmax(input / T, dim=1),\n F.softmax(teacher_logits / T, dim=1))\n\n def CE_loss(input,target):\n return nn.CrossEntropyLoss()(input.squeeze(), target)\n\n def KD_CE_loss(input, teacher_logits, target):\n\n return CE_loss(input,target) * alpha * T * T + KD_loss(input, teacher_logits) * (1 - alpha)\n\n return KD_CE_loss" ]
[ "0.72981083", "0.66527283", "0.63461035", "0.6343372", "0.62758875", "0.6175301", "0.61639184", "0.61602235", "0.6094912", "0.60944366", "0.6080397", "0.6054565", "0.60013235", "0.6001276", "0.5999888", "0.59844524", "0.5914654", "0.58692825", "0.58543164", "0.58519185", "0.5804028", "0.5779248", "0.57700247", "0.5752408", "0.57402664", "0.5736904", "0.5735856", "0.57355034", "0.57343674", "0.57059366" ]
0.72756386
1
Create a basic tree with a battery to blackboard writer and a battery check that flashes the LEDs on the mock robot if the battery level goes low.
def tutorial_create_root() -> py_trees.behaviour.Behaviour: root = py_trees.composites.Parallel( name="Tutorial Two", policy=py_trees.common.ParallelPolicy.SuccessOnAll( synchronise=False ) ) topics2bb = py_trees.composites.Sequence(name="Topics2BB", memory=True) battery2bb = py_trees_ros.battery.ToBlackboard( name="Battery2BB", topic_name="/battery/state", qos_profile=py_trees_ros.utilities.qos_profile_unlatched(), threshold=30.0 ) tasks = py_trees.composites.Selector("Tasks", memory=False) flash_led_strip = behaviours.FlashLedStrip( name="FlashLEDs", colour="red" ) def check_battery_low_on_blackboard(blackboard: py_trees.blackboard.Blackboard) -> bool: return blackboard.battery_low_warning battery_emergency = py_trees.decorators.EternalGuard( name="Battery Low?", condition=check_battery_low_on_blackboard, blackboard_keys={"battery_low_warning"}, child=flash_led_strip ) idle = py_trees.behaviours.Running(name="Idle") root.add_child(topics2bb) topics2bb.add_child(battery2bb) root.add_child(tasks) tasks.add_children([battery_emergency, idle]) return root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, battery_size=75):\n self.battery_size = battery_size", "def __init__(self, battery_size=75):\n self.battery_size = battery_size", "def __init__(self, battery_size= 75):\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\r\n\t\tself.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=40):\n self.battery_size = battery_size", "def create_tree(self, tree):\n # print(self)\n if len(self.available_combinations()) > 1:\n comb1 = random.choice(self.available_combinations())\n comb2 = random.choice(self.available_combinations())\n\n if self.last_move == 5:\n next_move = 7\n else:\n next_move = 5\n\n # print(next_move)\n\n board1 = copy.deepcopy(self)\n board2 = copy.deepcopy(self)\n\n board1.board[comb1[0]][comb1[1]] = next_move\n board1.last_move = 7\n tree.insert_left(board1)\n board2.board[comb2[0]][comb2[1]] = next_move\n board2.last_move = 7\n tree.insert_right(board2)\n\n board1.create_tree(tree.get_left_child())\n board2.create_tree(tree.get_left_child())", "def create_indicator_lq_battery(self):\n self.settings.New('pumping', dtype=bool, initial=False, ro=True)\n self.settings.New('predeposition', dtype=bool, initial=False, ro=True)\n self.settings.New('deposition', dtype=bool, initial=False, ro=True)\n self.settings.New('vent', dtype=bool, initial=False, ro=True)\n self.settings.New('pumped', dtype=bool, initial=False, ro=True)\n self.settings.New('gases_ready', dtype=bool, initial=False, ro=True)\n self.settings.New('substrate_hot', dtype=bool, initial=False, ro=True)\n self.settings.New('recipe_running', dtype=bool, initial=False, ro=True)\n self.settings.New('recipe_completed', dtype=bool, initial=False, ro=True)", "def test_battery_status_1() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/battery-status.1.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"timestamp\": \"2020-11-17T09:06:48+01:00\",\n \"batteryLevel\": 50,\n \"batteryAutonomy\": 128,\n \"batteryCapacity\": 0,\n \"batteryAvailableEnergy\": 0,\n \"plugStatus\": 0,\n \"chargingStatus\": -1.0,\n }\n\n vehicle_data = cast(\n models.KamereonVehicleBatteryStatusData,\n response.get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema),\n )\n\n assert vehicle_data.timestamp == \"2020-11-17T09:06:48+01:00\"\n assert vehicle_data.batteryLevel == 50\n assert vehicle_data.batteryTemperature is None\n assert vehicle_data.batteryAutonomy == 128\n assert vehicle_data.batteryCapacity == 0\n assert vehicle_data.batteryAvailableEnergy == 0\n assert vehicle_data.plugStatus == 0\n assert vehicle_data.chargingStatus == -1.0\n assert vehicle_data.chargingRemainingTime is None\n assert vehicle_data.chargingInstantaneousPower is None\n assert vehicle_data.get_plug_status() == enums.PlugState.UNPLUGGED\n assert vehicle_data.get_charging_status() == enums.ChargeState.CHARGE_ERROR", "def __init__(self,make,model,year):\r\n\t\tsuper().__init__(make,model,year)\r\n\t\tself.battery = Battery()", "async def test_battery(client, sensor_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'battery_level_percent{domain=\"sensor\",'\n 'entity=\"sensor.outside_temperature\",'\n 'friendly_name=\"Outside Temperature\"} 12.0' in body\n )", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n self.battery = Battery()", "def createNotification(self): \n if self.power_plugged == True and self.battery_percent >= 80:\n Notify.init(\"Charger Notifier\")\n notification = Notify.Notification.new(\n \"BATTERY\",\n \"Battery level is over 80%! Please unplug the charger.\",\n \"dialog-information\"\n )\n notification.show()\n elif self.power_plugged == False and self.battery_percent <= 40:\n Notify.init(\"Charger Notifier\")\n notification = Notify.Notification.new(\n \"BATTERY\",\n \"Battery level is less than 40%! Please plug in the charger.\",\n \"dialog-information\"\n )\n notification.show()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def design(self):\n if os.path.isdir('monoblock') is False:\n os.mkdir('monoblock')\n\n self.barr_bore.build_graphs()\n choice = input('Show indicator lines? (+/-): ')\n if choice == '+':\n self.barr_bore.show_graphs()\n\n while True:\n self.steel = materials.Steel(self.__choose_material())\n self.__calc_thickness()\n choice = ''\n if self.__is_hardenability() is True:\n print('\\tSelected steel is suitable.')\n else:\n print(f'\\tSelected steel is not suitable. r2, m: {self.r2}')\n choice = input('>>> your choice (ok, new, exit): ')\n if choice == 'ok':\n break\n elif choice == 'new':\n continue\n else:\n self.steel = materials.Steel([0, 0, 0])\n break\n if self.steel.sigma != 0:\n print(f'Your selected steel: {self.steel}')\n else:\n print('It is impossible to design monoblock barrel.\\n'\n 'You can use module for designing multi-layered barrel...')", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def __init__(self,battery_size=85):\n self.battery_size = battery_size", "def __init__(self, battery_size=75): #Note that battery_size is optional parameter if no value is provided.\n self.battery_size = battery_size", "def setup_level_1() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 39, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 25, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 44, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 45, 74, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 30, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 9, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(15, 24, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 24, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list)\n\n #create knight character for level\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.2, 270, 470, level.character_list)\n\n #knight asks for bribe\n guard_convo = Dialogue(300, 500, 150, 50, \"I know who you are...\\n if you pay me,\\n I'll turn a blind eye.\")\n level.dialogue_list.append(guard_convo)\n\n #create coin item to bribe knight character\n create_and_add_item_to_list(\"pics\\gold_1.png\", 0.5, 400, 250, level.item_list)\n\n #create prompts and info for rooms for object\n cell = RoomInfo(120, 100, \"Dungeon cell. There's a note and key. Someone's waiting for you in the garden.\")\n level.room_info_list.append(cell)\n guard_room = RoomInfo(450, 280, \"Guardroom. There's the unconconsious bodies of the guards. Your saviours must've gone to great lengths...\")\n level.room_info_list.append(guard_room)\n torture_chamber = RoomInfo(120, 280, \"Torture chamber. You've been here before. They were questioning you, but you didn't answer.\")\n level.room_info_list.append(torture_chamber)\n battle_room = RoomInfo(650, 280, \"Battle room. You see that your captors are fighting revolutionaries- those who seek to bring back a lost king.\")\n level.room_info_list.append(battle_room)\n stairwell = RoomInfo(220, 520, \"Stairwell. There's a lone guard who doesn't look surprised to see you\")\n level.room_info_list.append(stairwell)\n\n return level", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, battery_size=70):\n self.battery_size = battery_size\n self._range = 0", "def test_battery_status_2() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/battery-status.2.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"timestamp\": \"2020-01-12T21:40:16Z\",\n \"batteryLevel\": 60,\n \"batteryTemperature\": 20,\n \"batteryAutonomy\": 141,\n \"batteryCapacity\": 0,\n \"batteryAvailableEnergy\": 31,\n \"plugStatus\": 1,\n \"chargingStatus\": 1.0,\n \"chargingRemainingTime\": 145,\n \"chargingInstantaneousPower\": 27.0,\n }\n\n vehicle_data = cast(\n models.KamereonVehicleBatteryStatusData,\n response.get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema),\n )\n\n assert vehicle_data.timestamp == \"2020-01-12T21:40:16Z\"\n assert vehicle_data.batteryLevel == 60\n assert vehicle_data.batteryTemperature == 20\n assert vehicle_data.batteryAutonomy == 141\n assert vehicle_data.batteryCapacity == 0\n assert vehicle_data.batteryAvailableEnergy == 31\n assert vehicle_data.plugStatus == 1\n assert vehicle_data.chargingStatus == 1.0\n assert vehicle_data.chargingRemainingTime == 145\n assert vehicle_data.chargingInstantaneousPower == 27.0\n assert vehicle_data.get_plug_status() == enums.PlugState.PLUGGED\n assert vehicle_data.get_charging_status() == enums.ChargeState.CHARGE_IN_PROGRESS", "def __init__(self, manufacturer, model, year):\n super().__init__(manufacturer, model, year)\n self.battery = Battery()" ]
[ "0.5643367", "0.5643367", "0.5608296", "0.5592562", "0.5592101", "0.5592101", "0.5592101", "0.55572146", "0.55461454", "0.5498106", "0.54959106", "0.54520214", "0.5431685", "0.5410982", "0.54064524", "0.5403356", "0.53872925", "0.5385211", "0.5373222", "0.5351197", "0.53187335", "0.5300676", "0.5300676", "0.5300676", "0.5300676", "0.5300676", "0.5300676", "0.528835", "0.5274178", "0.5260299" ]
0.6854869
0
Read the path of the local database specified in the config file. An error will be raised if there is no such database, or more than one. This function is used by the "caldb" script and the set_local_database() function here.
def get_db_path_from_config(): if not globalConf.sections(): raise OSError("Cannot read config file.") databases = parse_databases() db_path = None for db in databases: if db[0] == LocalDB: if db_path is None: db_path = db[1] else: raise ValueError("Multiple local database files are listed " "in the config file.") if db_path is None: raise ValueError("No local database file is listed in the config file.") return db_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def get_path_db():\n\taiqc_config = get_config()\n\tif aiqc_config is None:\n\t\t# get_config() will print a null condition.\n\t\tpass\n\telse:\n\t\tdb_path = aiqc_config['db_path']\n\t\treturn db_path", "def get_db_path():\n \n return(db_run.db_abs_path)", "def settings_db_read_settings(db_name=SETTINGS_DB_NAME):\n paths = local = None\n if os.path.isfile(db_name):\n with lite.connect(db_name) as con:\n cur = con.cursor()\n result = cur.execute(\"SELECT LOGISIM_HOME, GRADING_PATH, IMPORT_PATH, GRADES_DB\\\n FROM PATHS\")\n paths = result.fetchone()\n result = cur.execute(\"SELECT GRADER_NAME, YEAR, SEMESTER, USE_STYLE, SYNC_COMMAND\\\n FROM LOCAL\")\n local = result.fetchone()\n\n return paths, local", "def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]", "def get_db_file(config):\n db_file = None\n\n db = config.get_config(\"db\")\n db_config = config.get_config(\"db_config\")\n if db == \"sqlalchemy\":\n db_file = db_config.get(\"uri\")\n\n return db_file", "def get_db_file(config):\n db_file = None\n\n db = config.get_config(\"db\")\n db_config = config.get_config(\"db_config\")\n if db == \"sqlalchemy\":\n db_file = db_config.get(\"uri\")\n\n return db_file", "def db_file():\n return abspath('vmchecker.db')", "def database():\n return conf().database", "def set_local_database():\n load_config()\n db_path = get_db_path_from_config()\n db = LocalDB(db_path, log=None)\n return db", "def database_file(file):\r\n fpath = path.join('databases', '{0}'.format(file))\r\n db_path = path.join(mod_path, fpath)\r\n return db_path", "def get_database_url(self):\n return self.config['dbase_path']", "def set_db_file():\n\n return os.path.join(db_path, db_file)", "def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")", "def currentDatabasePath(self):\n logger.debug(\"Func: currentDatabasePath/getter\")\n\n if not self._currentSceneInfo:\n msg = \"no current info\"\n # logger.error(msg)\n # raise Exception([101, msg])\n self._exception(101, msg)\n return\n\n if self._currentSceneInfo[\"SubProject\"] == \"None\":\n subP = \"\"\n else:\n subP = self._currentSceneInfo[\"SubProject\"]\n\n dbFile = os.path.join (self.projectDir,\n self._pathsDict[\"databaseDir\"],\n self._currentSceneInfo[\"Category\"],\n subP, \"%s.json\" %self._currentSceneInfo[\"Name\"])\n return dbFile", "def db_path(self, host: str) -> str:\n app_path = os.path.abspath(os.getcwd())\n folder = 'data'\n path = os.path.join(app_path, folder)\n return os.path.normpath(os.path.join(path, host))", "def get_database_filename() -> str:\n config_dir = get_config_dir()\n return os.path.join(config_dir, DATABASE_FILENAME)", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db", "def default_bug_db(configfile=None):\n\n configs=None\n if not configfile:\n configs=default_configs()\n else:\n configs=configparser.ConfigParser()\n configs.read(configfile)\n\n db_file=os.path.normpath(configs.get(\"bug_db\",\"db_file\"))\n return db_file", "def get_database_connection(dbfile='../data/localities.db'):\n\n global connection\n if not connection:\n connection = sqlite3.connect(dbfile)\n \n return connection", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def read_db_config(config_path, section='database'):\n\n parser = ConfigParser()\n parser.read(config_path)\n db = {}\n\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n db[item[0]] = item[1]\n else:\n raise FileNotFoundError('{} not found in the {} file'.format(section, config_path))\n\n return db", "def get_kraken_db_path(wildcards):\n\n db_name= wildcards.db_name\n assert 'kraken_db' in config, 'Expect a directory named \"kraken_db\" in the config file'\n assert db_name in config['kraken_db'], f'The name \"{db_name}\" is not in the config file under \"kraken_db\"'\n\n kraken_db_folder = config['kraken_db'][db_name]\n\n if not os.path.exists(kraken_db_folder):\n raise IOError(f\"{kraken_db_folder} doesn't exist\")\n\n if not all( os.path.exists(os.path.join(kraken_db_folder,file)) for file in kraken_db_files):\n raise IOError(f\"Expect {kraken_db_files} in {kraken_db_folder}\")\n\n\n return kraken_db_folder", "def get_database_directory(self):\n pass", "def default_db_config():\n return read_json_file(db_config_file)", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def db_python_only():\n return os.path.join(_here, 'fixtures/databases/db-python-only/database')" ]
[ "0.7601635", "0.75734", "0.72934604", "0.71561885", "0.7063485", "0.6992215", "0.6971256", "0.6971256", "0.67331797", "0.663469", "0.6604691", "0.6503781", "0.64978737", "0.64587384", "0.6458058", "0.64289236", "0.638937", "0.63717663", "0.6280685", "0.62532043", "0.6071763", "0.6021615", "0.6021615", "0.60070395", "0.5938749", "0.5934786", "0.5919558", "0.590942", "0.5905502", "0.5903148" ]
0.8613067
0
Initialize the calibration databases for a PrimitivesBASE object.
def init_calibration_databases(inst_lookups=None, procmode=None, ucals=None, upload=None): # Read the mdf_dict file and create an actual dict with the complete # paths to each of the MDF files try: masks = import_module('.maskdb', inst_lookups) mdf_dict = getattr(masks, 'mdf_dict') mdf_key = getattr(masks, 'mdf_key') except (ImportError, TypeError, AttributeError): mdf_dict = None mdf_key = None else: for k, v in mdf_dict.items(): mdf_dict[k] = path.join(path.dirname(masks.__file__), 'MDF', v) caldb = UserDB(name="manual calibrations", mdf_dict=mdf_dict, mdf_key=mdf_key, user_cals=ucals) upload_calibs = upload is not None and "calibs" in upload upload_science = upload is not None and "science" in upload for cls, db, kwargs in parse_databases(): kwargs["procmode"] = procmode if cls == RemoteDB: # Actually storing to a remote DB requires that "store" is set in # the config *and* the appropriate type is in upload kwargs["store_science"] = kwargs["store_cal"] and upload_science kwargs["store_cal"] &= upload_calibs elif cls == LocalDB: kwargs["force_init"] = False database = cls(db, name=db, **kwargs) caldb.add_database(database) return caldb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def init():\n try:\n database.CONN\n except Exception:\n database.init()\n print('Database connection established.')\n inputtools.init()\n outputtools.init()\n\n global _CONN\n global _NAME\n global _TEMP_NAME\n global _SIMPLECOUNT_COLUMNS\n global _UCR_INDICATOR_DICT\n \n _CONN = database.CONN\n _NAME = 'SimpleCount'\n _TEMP_NAME = f'Temp{_NAME}' \n _SIMPLECOUNT_COLUMNS = ['fk_simplecount_indicator', 'fk_simplecount_county', 'year', 'value']\n _UCR_INDICATOR_DICT = {\n 'domestic':1100,\n 'school':1120,\n 'hate':1130,\n 'acca': 1400,\n 'acsa':1401,\n 'ahsna':1402,\n 'adpa':1403,\n 'ameth':1404,\n 'ch':1410,\n 'rape':1411,\n 'rob':1412,\n 'aggba':1413,\n 'ach':1414,\n 'arape':1415,\n 'arob':1416,\n 'aaggba':1417,\n 'theft':1420,\n 'burg':1421,\n 'mvt':1422,\n 'arson':1423,\n 'atheft':1424,\n 'aburg':1425,\n 'amvt':1426,\n 'aarson':1427,\n 'htsex':1430,\n 'htserve':1431,\n 'ahtsex':1440,\n 'ahtserve':1441,\n }", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.add_numeric_cols()", "def initialize(self):\r\n \r\n self.client = gdata.spreadsheet.text_db.DatabaseClient(username=self.username, password=self.password)\r\n\r\n dbs = self.client.GetDatabases(spreadsheet_key=self.spreadsheet_key,\r\n name=self.spreadsheet_name)\r\n\r\n if len(dbs) < 1:\r\n raise Exception(\"No spreadsheets with key '%s' or name '%s'\" %\r\n (self.spreadsheet_key, self.spreadsheet_key))\r\n\r\n db = dbs[0]\r\n worksheets = db.GetTables(worksheet_id=self.worksheet_id,\r\n name=self.worksheet_name)\r\n\r\n self.worksheet = worksheets[0]\r\n self.worksheet.LookupFields()\r\n\r\n # FIXME: try to determine field types from next row\r\n self._fields = metadata.FieldList(self.worksheet.fields)", "def _real_initialize(self):\n pass", "def __init__(self):\n self.TECRDB_compounds_data_dict = {}\n self.TECRDB_compounds_pH7_species_id_dict = {}\n self.TECRDB_compounds_least_H_sid_dict = {}\n self.get_TECRDB_compounds_data()", "def memb_init(self):\n self.initialize()", "def _initialize_geospatial_data(self):\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n bnd_src = driver.Open(self._spatial_filename, 0)\n bnd_lyr = bnd_src.GetLayer()\n (self.spatial_index,\n self.spatial_feats,\n self.bison_spatial_fields\n ) = self._create_spatial_index(bnd_lyr)", "def init_database(self):\n # init_database(self.engine)", "def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()", "def _initObjects(self):\n\n print \"DEBUG: Initializing Entities\"\n ObjectType.initializeObjectTypes()", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def init():\n\n conn = r.connect()\n if not DB in r.db_list().run(conn):\n create(conn)\n f_print = fingerprint.get_fingerprint(SAMPLE_PATH)\n insert_reference(\n SAMPLE_NAME,\n f_print,\n SAMPLE_R_REF,\n SAMPLE_L_REF,\n SAMPLE_CLASS\n )\n return True\n else:\n return False", "def __init__(self, db):\n\n # Add database object\n self.db = db\n\n # Initialize a dictionary to store maps of meg data (oscillation bands)\n self.meg_maps = dict()\n self.bands = dict()\n\n # Initialize a dictionary to store exponent map\n self.exponent_map = dict({'Exponents': np.array([])})\n\n # Initialize booleans that keep track of what is loaded\n self.oscs_loaded = False\n self.exponents_loaded = False", "def _init(self):\n self._nfields = 0\n self._converted = {}\n self._heapoffset = 0\n self._heapsize = 0\n self._col_weakrefs = weakref.WeakSet()\n self._coldefs = None\n self._gap = 0\n self._uint = False", "def init_database(self):\n init_database(self.engine)", "def _initialize(self):\n configured_providers = self.domain.config[\"DATABASES\"]\n provider_objects = {}\n\n if configured_providers and isinstance(configured_providers, dict):\n if \"default\" not in configured_providers:\n raise ConfigurationError(\"You must define a 'default' provider\")\n\n for provider_name, conn_info in configured_providers.items():\n provider_full_path = conn_info[\"PROVIDER\"]\n provider_module, provider_class = provider_full_path.rsplit(\n \".\", maxsplit=1\n )\n\n provider_cls = getattr(\n importlib.import_module(provider_module), provider_class\n )\n provider = provider_cls(provider_name, self.domain, conn_info)\n\n provider_objects[provider_name] = provider\n\n self._providers = provider_objects", "def localInitialize(self):\n SVL = self.readFromROM()\n self._generateQuadsAndPolys(SVL)\n #print out the setup for each variable.\n msg = self.printTag+' INTERPOLATION INFO:\\n'\n msg += ' Variable | Distribution | Quadrature | Polynomials\\n'\n for v in self.quadDict:\n msg += ' '+' | '.join([v,self.distDict[v].type,self.quadDict[v].type,self.polyDict[v].type])+'\\n'\n msg += ' Polynomial Set Degree: '+str(self.maxPolyOrder)+'\\n'\n msg += ' Polynomial Set Type : '+str(SVL.indexSetType)+'\\n'\n self.raiseADebug(msg)\n\n self.raiseADebug('Starting index set generation...')\n self.indexSet = IndexSets.factory.returnInstance(SVL.indexSetType)\n self.indexSet.initialize(self.features, self.importanceDict, self.maxPolyOrder)\n if self.indexSet.type=='Custom':\n self.indexSet.setPoints(SVL.indexSetVals)\n\n self.sparseGrid = Quadratures.factory.returnInstance(self.sparseGridType)\n self.raiseADebug(f'Starting {self.sparseGridType} sparse grid generation...')\n self.sparseGrid.initialize(self.features, self.indexSet, self.dists, self.quadDict, self.jobHandler)\n\n if self.writeOut is not None:\n msg = self.sparseGrid.__csv__()\n outFile = open(self.writeOut,'w')\n outFile.writelines(msg)\n outFile.close()\n\n self.limit=len(self.sparseGrid)\n self.raiseADebug(f'Size of Sparse Grid: {self.limit}')\n self.raiseADebug('Finished sampler generation.')\n\n self.raiseADebug('indexset:',self.indexSet)\n for SVL in self.ROM.supervisedContainer:\n SVL.initialize({'SG': self.sparseGrid,\n 'dists': self.dists,\n 'quads': self.quadDict,\n 'polys': self.polyDict,\n 'iSet': self.indexSet})", "def __init__(self,db,tables=[]):\n #{{{ Load class and test databases\n self.dbcentral = db\n self.tables = tables\n self.debug = config.debug\n self.null_vals = defaultdict(lambda: defaultdict(dict))\n\n \"\"\"\n Load values from databases\n \"\"\"\n self._get_nulls()", "def __init__(self, *argv, **kwargs):\n self.refs = {}\n self.ref0s = {}\n self.defect_refs = {}\n\n self.initialize(*argv, **kwargs)", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def initdb(self):\n logger.info(\"Initializing database\")\n self.instances.drop()\n self.instances.create_index([('class_id', pymongo.HASHED)])\n # Creates a unique index\n self.instances.create_index(\n 'name',\n unique=True,\n partialFilterExpression={'deleted' : False}\n )\n start_time = time.time()\n timeout = 60 * 5\n while not self.axops_client.ping():\n if time.time() - start_time > timeout:\n raise AXTimeoutException(\"Timed out ({}s) waiting for axops availability\".format(timeout))\n time.sleep(3)\n\n for fix_doc in self.axdb_client.get_fixture_instances():\n instance = FixtureInstance.deserialize_axdbdoc(fix_doc)\n self.instances.insert_one(instance.mongodoc())\n\n logger.info(\"Database initialized\")", "def initialize(self):\r\n self.bucket_array.initialize()", "def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()" ]
[ "0.6023248", "0.60092384", "0.5815747", "0.5811158", "0.578031", "0.5776491", "0.5769026", "0.5710211", "0.5708011", "0.56971985", "0.56527954", "0.5638689", "0.5624701", "0.56140167", "0.5595212", "0.55946684", "0.5583777", "0.55719703", "0.55538756", "0.55204964", "0.55000246", "0.5488975", "0.5487708", "0.54791427", "0.5438799", "0.5430286", "0.54258335", "0.54255944", "0.5422453", "0.5414136" ]
0.61077964
0
Parse the databases listed in the global config file. This returns a list provided information on how to build the cascase of databases, but does not instantiate any CalDB objects, so it can be used by the caldb script efficiently.
def parse_databases(default_dbname="cal_manager.db"): db_list = [] calconf = get_calconf() if not calconf: return db_list upload_cookie = calconf.get("upload_cookie") # Allow old-format file to be read try: databases = calconf["databases"] except KeyError: databases = calconf.get("database_dir") if not databases: return db_list with warnings.catch_warnings(): warnings.simplefilter("always", DeprecationWarning) warnings.warn("Use 'databases' instead of 'database_dir' in " "config file.", DeprecationWarning ) for line in databases.splitlines(): if not line: # handle blank lines continue db, *flags = shlex.split(line) # "get" is default if there are no flags, but if any flags are # specified, then "get" must be there explicitly kwargs = {"get_cal": not bool(flags), "store_cal": False} for flag in flags: kwarg = f"{flag}_cal" if kwarg in kwargs: kwargs[kwarg] = True else: raise ValueError("{}: Unknown flag {!r}".format(db, flag)) expanded_db = path.expanduser(db) if path.isdir(expanded_db): db = path.join(db, default_dbname) cls = LocalDB elif path.isfile(expanded_db): cls = LocalDB elif "/" in expanded_db and "//" not in expanded_db: cls = LocalDB else: # does not check cls = RemoteDB kwargs["upload_cookie"] = upload_cookie db_list.append((cls, db, kwargs)) return db_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def config_parsing(configfile):\n config = ConfigParser.ConfigParser()\n config.read(configfile)\n db_connection = config.get('app:main', 'database_connection')\n db, eng = map_database(db_connection)\n return db, eng", "def get_databases(self):\n pass", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def config_db():", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def __init__(self):\n self.db = []\n for line in file(self.conf_file):\n line = line.strip()\n if line and not line.startswith(\"#\"):\n self.db.append(NSSDB(line))", "def setupDatabases(con, options, dbList):\n currentDatabases = dbGetFirstColumnAsMap(con, \"select datname from pg_database where datistemplate = false\")\n currentRolenames = dbGetFirstColumnAsMap(con, \"select rolname from pg_roles\")\n trace(\"currentDatabases = \" + str(currentDatabases))\n for dbName in dbList:\n trace(\"dbName='%s'\" % str(dbName))\n setupDatabase(con, options, currentDatabases, currentRolenames, dbName, dbList[dbName])", "def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])", "def parsedbconfig(self):\n p = xml.parsers.expat.ParserCreate()\n p.StartElementHandler = start_element\n f=open(self.__dbconfig,'r')\n p.ParseFile(f)\n self.fillregistry()", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def get_old_db_details():\n db_conf_prelim = {\n 'host': None,\n 'port': None,\n 'database': None,\n 'username': None,\n 'password': None }\n\n db_conf = codecs.open(\"db.conf\", encoding=\"utf-8\", mode=\"r\")\n db_conf_text = db_conf.read()\n db_conf.close()\n\n prev_host = search_config(\"metastore.connection-postgresql.host\", \"host\", db_conf_text)\n prev_port = search_config(\"metastore.connection-postgresql.port\", \"port\", db_conf_text)\n prev_database = search_config(\"metastore.connection-postgresql.database\", \"database\", db_conf_text)\n prev_username = search_config(\"metastore.connection-postgresql.username\", \"username\", db_conf_text)\n prev_password = search_config(\"metastore.connection-postgresql.password\", \"password\", db_conf_text)\n db_conf_from_file = {\n 'host': prev_host if prev_host else None,\n 'port': prev_port if prev_port else None,\n 'database': prev_database if prev_database else None,\n 'username': prev_username if prev_username else None,\n 'password': prev_password if prev_password else None }\n\n db_conf_prelim.update(db_conf_from_file)\n return db_conf_prelim", "def get_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_dbs = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if scope[\"@type\"] == \"system:Database\":\n all_dbs.append(scope)\n return all_dbs", "def get_databases(self):\n query = mssqlqueries.get_databases()\n logger.info(u'Databases query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def list_databases(self):\n r = self.__get_response(settings.LST_DBS)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def __get_available_databases(self, root):\n\t\tfor i in walk_tree(root):\n\t\t\tif '.sqlite3' in i:\n\t\t\t\tyield os.path.abspath(i)", "def get_databases ():\n return _dbobjects[:]", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def databases(self):\n return self._databases", "def getdbconfig(runmode, dbmodelname):\r\n #print('getdbcofig : ' + runmode + \" \" + dbmodelname)\r\n try:\r\n path = os.path.split(os.path.realpath(__file__))[0]\r\n #print 'db.xml path: ' + path + \"/config/db.xml\"\r\n if (runmode != \"test\"):\r\n dbjsonfile = open(path + '/config/' + runmode + '/db.json', 'r')\r\n dbjson = json.load(dbjsonfile)\r\n #print('config file path : ' + path + \"/config/\" + runmode + \"/db.json\")\r\n else:\r\n dbjsonfile = open(path + '/config/db.json', 'r')\r\n dbjson = json.load(dbjsonfile)\r\n #print('config file path : ' + path + \"/config/db.json\")\r\n \r\n \r\n #print('from dbconfig file get : dbjson' + str(dbjson))\r\n for db in dbjson:\r\n #print db.getAttribute('id')\r\n if db['dbmodelname'] == dbmodelname:\r\n dbuser = db['userid']\r\n dbpasswd = db['password']\r\n dburl = db['dburl']\r\n dburlport = db['dburlport']\r\n dbname = db['dbname']\r\n break\r\n return (dbuser, dbpasswd, dburl, dburlport, dbname)\r\n except Exception as e:\r\n return ('except', str(e), '', '', '')", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db" ]
[ "0.71556365", "0.63972634", "0.6315254", "0.6269479", "0.6201598", "0.61059517", "0.6039825", "0.6018042", "0.5927069", "0.58826655", "0.58598214", "0.58555484", "0.5853016", "0.58385587", "0.5826395", "0.5825476", "0.5823567", "0.5823567", "0.58104885", "0.5800547", "0.5791353", "0.57786435", "0.5766497", "0.57601374", "0.57589287", "0.5751659", "0.57486916", "0.5734329", "0.57218164", "0.5718908" ]
0.7657391
0
User helper function to define a local calibration database based on the "dragonsrc" config file. Returns A LocalDB object
def set_local_database(): load_config() db_path = get_db_path_from_config() db = LocalDB(db_path, log=None) return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_local_database(db_file_path):\n conn = sql.connect(db_file_path)\n cur = conn.cursor()\n\n table = str('CREATE TABLE app_config ('\n 'ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,'\n 'Name TEXT UNIQUE NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE menu_data ('\n 'ContextId TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE profiles ('\n 'ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,'\n 'Guid TEXT NOT NULL UNIQUE,'\n 'IsActive BOOLEAN DEFAULT (0) NOT NULL,'\n 'SortOrder INTEGER NOT NULL);')\n cur.execute(table)\n\n table = str('CREATE TABLE profiles_config ('\n 'Guid TEXT NOT NULL,'\n 'Name TEXT NOT NULL,'\n 'Value TEXT,'\n 'PRIMARY KEY (Guid, Name ),'\n 'FOREIGN KEY (Guid)'\n 'REFERENCES Profiles (Guid) ON DELETE CASCADE ON UPDATE CASCADE);')\n cur.execute(table)\n\n table = str('CREATE TABLE session ('\n 'Name TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE settings_monitor ('\n 'Name TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE search ('\n 'ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,'\n 'Guid TEXT NOT NULL REFERENCES profiles (Guid) ON DELETE CASCADE ON UPDATE CASCADE,'\n 'Type TEXT NOT NULL,'\n 'Value TEXT NOT NULL,'\n 'Parameters TEXT,'\n 'LastAccess TEXT);')\n cur.execute(table)\n\n if conn:\n conn.close()", "def setup(db_name = 'net.db', **extra_params):\n global db_run # Imports the DB from the simulator\n \n# # If the file already exists delete it\n if DEBUG: print \"[ pyNN ] : Opening DB\", os.path.abspath(db_name)\n if os.path.exists(db_name):\n if DEBUG: print \"[ pyNN ] : DB already initialized... cleaning up... removing file %s\" % db_name\n os.remove(db_name)\n db_run = db(db_name) # Creates the DB \n db_run.init_db() # Initializes the DB\n return(db_run)", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def set_db_file():\n\n return os.path.join(db_path, db_file)", "def config_db():", "def database(db):\n if type(db) is str:\n # Database name\n if db.endswith('.py'):\n # Python source, exec it\n globals = {}\n exec(compile(open(db).read(), db, 'exec'), globals)\n if 'DB' in globals:\n db = globals['DB']\n else:\n storage = globals['Storage']\n from ZODB.DB import DB\n db = DB(storage, cache_size=4000)\n elif db.endswith(\".fs\"):\n from ZODB.DB import DB\n from ZODB.FileStorage import FileStorage\n storage = FileStorage(db)\n db = DB(storage, cache_size=4000)\n\n # The following will fail unless the application has been configured.\n from zope.event import notify\n notify(zope.processlifetime.DatabaseOpened(db))\n\n return db", "def db_file():\n return abspath('vmchecker.db')", "def db_assetstore(env):\n\n # try local.config\n for dspace_cfg in env[\"config_dist_relative\"]:\n dspace_cfg = os.path.join( os.getcwd( ), dspace_cfg )\n if os.path.exists( dspace_cfg ):\n env[\"config_dist_relative\"][0] = dspace_cfg\n break\n dspace_cfg = os.path.join( os.getcwd( ), env[\"config_dist_relative\"][0] )\n prefix = \"lr.\"\n if not os.path.exists( dspace_cfg ):\n _logger.info( \"Could not find [%s]\", dspace_cfg )\n dspace_cfg, prefix = os.path.join( os.getcwd( ), env[\"dspace_cfg_relative\"] ), \"\"\n # try dspace.cfg\n if not os.path.exists( dspace_cfg ):\n _logger.info( \"Could not find [%s]\", dspace_cfg )\n dspace_cfg = None\n\n # not found\n if dspace_cfg is None:\n return None\n\n # get the variables\n #\n _logger.info( \"Parsing for [%s]\", dspace_cfg )\n db_username = None\n db_pass = None\n db_table = None\n db_port = \"\"\n if os.path.exists( dspace_cfg ):\n lls = open( dspace_cfg, \"r\" ).readlines( )\n for l in lls:\n l = l.strip( )\n if l.startswith( prefix + \"db.username\" ):\n db_username = l.strip( ).split( \"=\" )[1].strip( )\n if l.startswith( prefix + \"db.password\" ):\n db_pass = l.strip( ).split( \"=\" )[1].strip( )\n if db_table is None and l.startswith( prefix + \"db.url\" ):\n db_table = l.strip( ).split( \"/\" )[-1].strip( )\n db_port = l.strip( ).split( \":\" )[-1].split( \"/\" )[0]\n if l.startswith( prefix + \"database \" ) or l.startswith( prefix + \"database=\" ):\n db_table = l.strip( ).split( \"=\" )[1].split( \"/\" )[-1].strip( )\n\n _logger.info( \"Trying to connect to [%s] under [%s] at port [%s]\",\n db_table, db_username, db_port )\n\n # get the db table\n import bpgsql\n try:\n con = bpgsql.connect(\n username=db_username, password=db_pass, host=\"127.0.0.1\", dbname=db_table, port=db_port )\n cursor = con.cursor( )\n cursor.execute( \"\"\"\n\tselect text_value, internal_id from bitstream as b JOIN metadatavalue as md ON md.resource_id = b.bitstream_id NATURAL JOIN metadatafieldregistry NATURAL JOIN metadataschemaregistry where md.resource_type_id = 0 and short_id='dc' and element='title' and qualifier is null; \n\n \"\"\" )\n objs = cursor.fetchall( )\n # better explicitly\n cursor.close( )\n con.close( )\n return dict( [(y, x) for x, y in objs] )\n except Exception, e:\n _logger.exception( \"No connection could be made\" )", "def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])", "def default_bug_db(configfile=None):\n\n configs=None\n if not configfile:\n configs=default_configs()\n else:\n configs=configparser.ConfigParser()\n configs.read(configfile)\n\n db_file=os.path.normpath(configs.get(\"bug_db\",\"db_file\"))\n return db_file", "def init_db(configuration):\n db = ZODB.config.databaseFromString(configuration)\n for init in IDBInitializer.subscription(db):\n init(db)\n return db", "def set_db(db):\n global db_run # Imports the DB from the simulator\n db_run=db", "def create_db():\r\n\r\n try:\r\n os.remove(proc_loc + 'SF_Parking.db')\r\n print(\"Legacy DB deleted\")\r\n except:\r\n pass\r\n disk_engine = create_engine('sqlite:///'+ proc_loc +'SF_Parking.db')\r\n return sqlite3.connect(proc_loc + 'SF_Parking.db')", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def init_calibration_databases(inst_lookups=None, procmode=None,\n ucals=None, upload=None):\n # Read the mdf_dict file and create an actual dict with the complete\n # paths to each of the MDF files\n try:\n masks = import_module('.maskdb', inst_lookups)\n mdf_dict = getattr(masks, 'mdf_dict')\n mdf_key = getattr(masks, 'mdf_key')\n except (ImportError, TypeError, AttributeError):\n mdf_dict = None\n mdf_key = None\n else:\n for k, v in mdf_dict.items():\n mdf_dict[k] = path.join(path.dirname(masks.__file__),\n 'MDF', v)\n caldb = UserDB(name=\"manual calibrations\", mdf_dict=mdf_dict,\n mdf_key=mdf_key, user_cals=ucals)\n\n upload_calibs = upload is not None and \"calibs\" in upload\n upload_science = upload is not None and \"science\" in upload\n for cls, db, kwargs in parse_databases():\n kwargs[\"procmode\"] = procmode\n if cls == RemoteDB:\n # Actually storing to a remote DB requires that \"store\" is set in\n # the config *and* the appropriate type is in upload\n kwargs[\"store_science\"] = kwargs[\"store_cal\"] and upload_science\n kwargs[\"store_cal\"] &= upload_calibs\n elif cls == LocalDB:\n kwargs[\"force_init\"] = False\n database = cls(db, name=db, **kwargs)\n caldb.add_database(database)\n return caldb", "def db(request):\n controller.util.logger_setup()\n properties.parse_settings()\n if request.config.getoption(\"--realdb\"):\n db = connect(\"baden_test_db\", host=properties.DB_HOST, port=properties.DB_PORT)\n print(\"Connected to local database\")\n else:\n db = connect('mongoenginetest', host='mongomock://localhost')\n print(\"Connected to mock database\")\n return db", "def get_db(file_path):\n db_new = not os.path.isfile(file_path)\n sqlite3_detect_types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES\n db = sqlite3.connect(file_path, detect_types=sqlite3_detect_types)\n if db_new:\n create_db(db)\n return db", "def connect(filename=DATABASE_FILENAME):\n if not path.exists(filename):\n raise FileNotFoundError(\"Database file not found: \" + filename)\n with open(filename, 'r', encoding=\"utf-8\") as f:\n return Database(json.load(f))", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def get_db():\n if \"db\" not in g:\n g.db = sqlite3.connect(current_app.config[\"DATABASE\"], detect_types=sqlite3.PARSE_DECLTYPES)\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def create_new_db():\n global data_base, table\n data_base = asksaveasfilename(title=\"Select file\", filetypes=((\"DATA BASE\", \"*.db\"), (\"all files\", \"*.*\")),\n defaultextension='.db')\n\n if Path(data_base).suffix == '.db':\n create_win_create_table()\n else:\n mistake_db_file()", "def database():\n return conf().database", "def setupDbEnv(baseDirPath=None):\n global gDbEnv, gDbDirPath\n\n if not baseDirPath:\n baseDirPath = DATABASE_DIR_PATH\n\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n try:\n os.makedirs(baseDirPath)\n except OSError as ex:\n baseDirPath = ALT_DATABASE_DIR_PATH\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n os.makedirs(baseDirPath)\n else:\n if not os.access(baseDirPath, os.R_OK | os.W_OK):\n baseDirPath = ALT_DATABASE_DIR_PATH\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n os.makedirs(baseDirPath)\n\n gDbDirPath = baseDirPath # set global\n\n gDbEnv = lmdb.open(gDbDirPath, max_dbs=MAX_DB_COUNT)\n # creates files data.mdb and lock.mdb in dbBaseDirPath\n\n # create named dbs (core and tables)\n gDbEnv.open_db(b'core')\n gDbEnv.open_db(b'hid2did') # table of dids keyed by hids\n gDbEnv.open_db(b'did2offer', dupsort=True) # table of offer expirations keyed by offer relative dids\n gDbEnv.open_db(b'anon', dupsort=True) # anonymous messages\n gDbEnv.open_db(b'expire2uid', dupsort=True) # expiration to uid anon\n\n return gDbEnv", "def init():\n\n # delete existing file\n if os.path.exists(DBFILE):\n os.remove(DBFILE)\n\n db = sqlite3.connect(DBFILE)\n # create tables\n create(db, PARAGRAPH, \"paragraph\")\n create(db, QUESTION, \"question\")\n create(db, ANSWER, \"answer\")\n\n return db", "def generate_database_object(**kwargs):\n return app.database.Database(\"test.db\", **kwargs)", "def __init__(self, db_file):\n pass", "def load_db(path_to_db):\n db_run = db(path_to_db) # Instantiates the DB by reading the file\n db_run.import_config_db() # Imports configuration DB\n db_run.conn.row_factory = sqlite3.Row # Better select results\n return(db_run)", "def init_database():\n\n # The current dir should be the script home\n homedir = os.path.normpath(\n os.path.dirname(\n sys.executable if getattr(sys, 'frozen', False) else\n __file__)) # cx_Freeze compatibility\n os.chdir(homedir)\n\n engine = create_engine(\"sqlite:///data.db\")\n BASE.metadata.bind = engine\n BASE.metadata.create_all()\n\n return engine", "def initialize_sqlite_database(sel_wormbase_version,\n strain_only=False):\n start = arrow.utcnow()\n console.log(\"Initializing Database\")\n\n SQLITE_PATH = f\"base/cendr.{DATASET_RELEASE}.{sel_wormbase_version}.db\"\n SQLITE_BASENAME = os.path.basename(SQLITE_PATH)\n\n # Download wormbase files\n if strain_only is False:\n if os.path.exists(SQLITE_PATH):\n os.remove(SQLITE_PATH)\n\n if not os.path.exists(DOWNLOAD_PATH):\n os.makedirs(DOWNLOAD_PATH)\n\n # Parallel URL download\n console.log(\"Downloading Wormbase Data\")\n download([URLS.GENE_GFF_URL,\n URLS.GENE_GTF_URL,\n URLS.GENE_IDS_URL,\n URLS.HOMOLOGENE_URL,\n URLS.ORTHOLOG_URL,\n URLS.TAXON_ID_URL],\n DOWNLOAD_PATH)\n\n gff_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_GFF_URL)\n gtf_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_GTF_URL)\n gene_ids_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_IDS_URL)\n homologene_fname = download_fname(DOWNLOAD_PATH, URLS.HOMOLOGENE_URL)\n ortholog_fname = download_fname(DOWNLOAD_PATH, URLS.ORTHOLOG_URL)\n\n from base.application import create_app\n app = create_app()\n app.config['SQLALCHEMY_DATABASE_URI'] = f\"sqlite:///{SQLITE_BASENAME}\"\n app.app_context().push()\n\n if strain_only is True:\n db.metadata.drop_all(bind=db.engine, checkfirst=True, tables=[Strain.__table__])\n db.metadata.create_all(bind=db.engine, tables=[Strain.__table__])\n else:\n db.create_all(app=app)\n db.session.commit()\n\n console.log(f\"Created {SQLITE_PATH}\")\n\n ################\n # Load Strains #\n ################\n console.log('Loading strains...')\n db.session.bulk_insert_mappings(Strain, fetch_andersen_strains())\n db.session.commit()\n console.log(f\"Inserted {Strain.query.count()} strains\")\n\n if strain_only is True:\n console.log('Finished loading strains')\n return\n\n ################\n # Set metadata #\n ################\n console.log('Inserting metadata')\n metadata = {}\n metadata.update(vars(constants))\n metadata.update({\"CENDR_VERSION\": CENDR_VERSION,\n \"APP_CONFIG\": APP_CONFIG,\n \"DATASET_RELEASE\": DATASET_RELEASE,\n \"WORMBASE_VERSION\": sel_wormbase_version,\n \"RELEASES\": RELEASES,\n \"DATE\": arrow.utcnow()})\n for k, v in metadata.items():\n if not k.startswith(\"_\"):\n # For nested constants:\n if type(v) == type:\n for name in [x for x in dir(v) if not x.startswith(\"_\")]:\n key_val = Metadata(key=\"{}/{}\".format(k, name),\n value=getattr(v, name))\n db.session.add(key_val)\n else:\n key_val = Metadata(key=k, value=str(v))\n db.session.add(key_val)\n\n db.session.commit()\n\n ##############\n # Load Genes #\n ##############\n console.log('Loading summary gene table')\n genes = fetch_gene_gff_summary(gff_fname)\n db.session.bulk_insert_mappings(WormbaseGeneSummary, genes)\n db.session.commit()\n\n console.log('Loading gene table')\n db.session.bulk_insert_mappings(WormbaseGene, fetch_gene_gtf(gtf_fname, gene_ids_fname))\n gene_summary = db.session.query(WormbaseGene.feature,\n db.func.count(WormbaseGene.feature)) \\\n .group_by(WormbaseGene.feature) \\\n .all()\n gene_summary = '\\n'.join([f\"{k}: {v}\" for k, v in gene_summary])\n console.log(f\"============\\nGene Summary\\n------------\\n{gene_summary}\\n============\")\n\n ###############################\n # Load homologs and orthologs #\n ###############################\n console.log('Loading homologs from homologene')\n db.session.bulk_insert_mappings(Homologs, fetch_homologene(homologene_fname))\n db.session.commit()\n\n console.log('Loading orthologs from WormBase')\n db.session.bulk_insert_mappings(Homologs, fetch_orthologs(ortholog_fname))\n db.session.commit()\n\n #############\n # Upload DB #\n #############\n\n # Upload the file using todays date for archiving purposes\n console.log(f\"Uploading Database ({SQLITE_BASENAME})\")\n upload_file(f\"db/{SQLITE_BASENAME}\", SQLITE_PATH)\n\n diff = int((arrow.utcnow() - start).total_seconds())\n console.log(f\"{diff} seconds\")\n\n # =========================== #\n # Generate gene id dict #\n # =========================== #\n # Create a gene dictionary to match wormbase IDs to either the locus name\n # or a sequence id\n gene_dict = {x.gene_id: x.locus or x.sequence_name for x in WormbaseGeneSummary.query.all()}\n pickle.dump(gene_dict, open(\"base/static/data/gene_dict.pkl\", 'wb'))", "def db_path_with_improper_files():\n return os.path.join(_here, 'fixtures/databases/db-improper/database')" ]
[ "0.6375822", "0.62055", "0.61780226", "0.6162955", "0.60500777", "0.6020546", "0.5967176", "0.5930668", "0.59141713", "0.5861882", "0.5850293", "0.5849473", "0.5848049", "0.5810362", "0.578116", "0.577531", "0.5756106", "0.57501054", "0.5743103", "0.57386154", "0.573841", "0.5734136", "0.571116", "0.5700737", "0.56949794", "0.56883985", "0.5683408", "0.56793606", "0.5674578", "0.5665663" ]
0.7368509
0
Component cannot be built due to its subcomponent remaining input incomplete.
def test_inner_deadlock_of_component(self): a = DummyProducingInputIncompleteBuild(scope="A") try: test = ComponentTest(component=a, input_spaces=dict(input_=float)) except RLGraphBuildError as e: print("Seeing expected RLGraphBuildError ({}). Test ok.".format(e)) else: raise RLGraphError("Not seeing expected RLGraphBuildError with input-incomplete model!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_solution_of_inner_deadlock_of_component_with_must_be_complete_false(self):\n a = DummyProducingInputIncompleteBuild(scope=\"A\")\n deadlock_component = a.sub_components[\"dummy-calling-one-api-from-within-other\"]\n # Manually set the must_be_complete flag to false.\n deadlock_component.api_methods[\"run_inner\"].must_be_complete = False\n test = ComponentTest(component=a, input_spaces=dict(input_=float))\n print(\"Not seeing RLGraphBuildError. Test ok.\")", "def validate_component(self, entry):\n\n if 'type' in entry:\n self.validate_type_entry(entry)\n return\n\n check_fields(entry, ['name', 'edges'])\n\n if 'states' not in entry:\n for edge in entry['edges']:\n edge_value = entry['edges'][edge]\n if 'nodes' not in edge_value or 'teq' not in edge_value or len(edge_value) != 2:\n raise exceptions.BadInputError(\n f'invalid single-state component syntax in {entry}')\n\n self.components.append(entry)", "def _build(self):\n raise NotImplementedError()", "def test_badComponentName(self):\n nPins = 12\n fuelDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 0.9, \"id\": 0.0, \"mult\": nPins}\n cladDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n fuel = Circle(\"fuel\", \"UZr\", **fuelDims)\n clad = Circle(\"clad_4.2.3\", \"HT9\", **cladDims)\n gapDims = {\n \"Tinput\": 25.0,\n \"Thot\": 430.0,\n \"od\": \"clad_4.2.3.id\",\n \"id\": \"fuel.od\",\n \"mult\": nPins,\n }\n gapDims[\"components\"] = {\"clad_4.2.3\": clad, \"fuel\": fuel}\n with self.assertRaises(ValueError):\n _gap = Circle(\"gap\", \"Void\", **gapDims)", "def skip_sub_components(self, reason):\n pass", "def build (self):\n raise NotImplementedError", "def build(self):\n raise NotImplementedError", "def build_component(component: str) -> None:\n parts: Final = component.split(\"-\", maxsplit=1)\n if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case\n sys.exit(f\"Internal error: build_component() invoked with {component=!r}\")\n kolla_component, kolla_service = parts\n build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)\n\n with tempfile.NamedTemporaryFile(\n mode=\"wt\", encoding=\"UTF-8\", prefix=\"Dockerfile.\"\n ) as dockerfile:\n dockerfile.write(build.dockerfile)\n dockerfile.flush()\n subprocess.check_call([\"ls\", \"-l\", \"--\", dockerfile.name])\n subprocess.check_call([\"cat\", \"--\", dockerfile.name])\n\n cmd: Final[list[str | pathlib.Path]] = [\n \"docker\",\n \"build\",\n \"-t\",\n f\"storpool/{build.container_name}{cfg.tag_suffix}\",\n \"--rm\",\n *([\"--no-cache\"] if no_cache else []),\n *([\"--pull\"] if pull else []),\n \"-f\",\n dockerfile.name,\n \"--\",\n datadir,\n ]\n cmd_str: Final = shlex.join(str(word) for word in cmd)\n cfg.diag(lambda: f\"Running `{cmd_str}`\")\n try:\n subprocess.run(cmd, check=True)\n except (OSError, subprocess.CalledProcessError) as err:\n sys.exit(f\"Could not run `{cmd_str}`: {err}\")", "def child_invalid(self):\n raise NotImplementedError(\n \"{} does not have implemented `child_invalid`\".format(self)\n )", "def build(self) -> None:", "def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)", "def _build(self):", "def _build(self):", "def modifyNotValuableComponents(self):\n # Nothing to do\n pass", "def __init__(self, component):\r\n self.component = component", "def __init__(self, component):\r\n self.component = component", "def build(self):\n pass", "def build(self):\n pass", "def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_faulty_op_catching(self):\n # Construct some easy component containing a sub-component.\n dense_layer = DenseLayer(units=2, scope=\"dense-layer\")\n string_layer = EmbeddingLookup(embed_dim=3, vocab_size=4, scope=\"embed-layer\")\n container_component = Component(dense_layer, string_layer)\n\n # Add the component's API method.\n @rlgraph_api(component=container_component)\n def test_api(self, a):\n dense_result = self.get_sub_component_by_name(\"dense-layer\").call(a)\n # First call dense to get a vector output, then call embedding, which is expecting an int input.\n # This should fail EmbeddingLookup's input space checking (only during the build phase).\n return self.get_sub_component_by_name(\"embed-layer\").call(dense_result)\n\n # Test graphviz component graph drawing.\n draw_meta_graph(container_component, apis=True)\n\n test = ComponentTest(\n component=container_component,\n input_spaces=dict(a=spaces.FloatBox(shape=(4,), add_batch_rank=True))\n )", "def test_remove_component_invalid():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component_to_remove')", "def test_component_remove_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component remove bad_component')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def build(self):\n raise Exception(\" not implemented in base model\")", "def build(self):\n raise NotImplementedError(\"This is an interface method. Implement it in subclass.\")", "def build(self, parent):\n raise NotImplementedError", "def built(self) -> bool:\n raise NotImplementedError()", "def _check_for_incomplete_input(self):\n pass", "def build(_):", "def build(self, input_shape):\n #pylint: disable=useless-super-delegation\n super().build(input_shape)" ]
[ "0.6003138", "0.58036244", "0.57034343", "0.5630467", "0.56166065", "0.5606792", "0.5565328", "0.5564179", "0.5515907", "0.54965544", "0.5484223", "0.53754073", "0.53754073", "0.5374521", "0.5365614", "0.5365614", "0.53653276", "0.53653276", "0.53585935", "0.5328791", "0.52988267", "0.528028", "0.5244058", "0.5233737", "0.5120761", "0.51116717", "0.5086088", "0.5080021", "0.5074832", "0.50544536" ]
0.69534314
0
Component can be built due to its subcomponent resolving a deadlock with `must_be_complete`.
def test_solution_of_inner_deadlock_of_component_with_must_be_complete_false(self): a = DummyProducingInputIncompleteBuild(scope="A") deadlock_component = a.sub_components["dummy-calling-one-api-from-within-other"] # Manually set the must_be_complete flag to false. deadlock_component.api_methods["run_inner"].must_be_complete = False test = ComponentTest(component=a, input_spaces=dict(input_=float)) print("Not seeing RLGraphBuildError. Test ok.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inner_deadlock_of_component(self):\n a = DummyProducingInputIncompleteBuild(scope=\"A\")\n try:\n test = ComponentTest(component=a, input_spaces=dict(input_=float))\n except RLGraphBuildError as e:\n print(\"Seeing expected RLGraphBuildError ({}). Test ok.\".format(e))\n else:\n raise RLGraphError(\"Not seeing expected RLGraphBuildError with input-incomplete model!\")", "async def on_building_construction_complete(self, unit: Unit):", "def needsResolution (self):\n return self.__unresolvedComponents is not None", "def built(self) -> bool:\n raise NotImplementedError()", "def build(self):\n self.lock_built = True", "async def on_building_construction_started(self, unit: Unit):", "def build(self):\n\n return True", "def build(self):\n\n return True", "def build(self):\n for component, type in self.__get_data(\"comps\").items():\n self.add_comp(component, type)\n\n self.logger.info('Build of {} finished'.format(self.name))", "def check_component(comp_name: str, comp: defs.Component) -> None:\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_pm_Completeness(self):\n pass", "def __wait_for_components(self, num_comps):\n for _ in range(10):\n num = self.__client.rpc_component_count()\n if num == num_comps:\n break\n time.sleep(1)\n\n num = self.__client.rpc_component_count()\n if num > num_comps:\n print(\"CnCServer still has %d components (expect %d)\" %\n (num, num_comps), file=sys.stderr)", "def busy(self) -> bool:\n return self.state != SubflowState.Available", "def checkBuildStatus(self):\n pass", "def _build(self):\n raise NotImplementedError()", "def build_component(component: str) -> None:\n parts: Final = component.split(\"-\", maxsplit=1)\n if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case\n sys.exit(f\"Internal error: build_component() invoked with {component=!r}\")\n kolla_component, kolla_service = parts\n build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)\n\n with tempfile.NamedTemporaryFile(\n mode=\"wt\", encoding=\"UTF-8\", prefix=\"Dockerfile.\"\n ) as dockerfile:\n dockerfile.write(build.dockerfile)\n dockerfile.flush()\n subprocess.check_call([\"ls\", \"-l\", \"--\", dockerfile.name])\n subprocess.check_call([\"cat\", \"--\", dockerfile.name])\n\n cmd: Final[list[str | pathlib.Path]] = [\n \"docker\",\n \"build\",\n \"-t\",\n f\"storpool/{build.container_name}{cfg.tag_suffix}\",\n \"--rm\",\n *([\"--no-cache\"] if no_cache else []),\n *([\"--pull\"] if pull else []),\n \"-f\",\n dockerfile.name,\n \"--\",\n datadir,\n ]\n cmd_str: Final = shlex.join(str(word) for word in cmd)\n cfg.diag(lambda: f\"Running `{cmd_str}`\")\n try:\n subprocess.run(cmd, check=True)\n except (OSError, subprocess.CalledProcessError) as err:\n sys.exit(f\"Could not run `{cmd_str}`: {err}\")", "def has_composed_rpm_bulid_libs(self):\n return self.version_info >= (4, 9, 0)", "def valid_dependency(self, dep):\r\n return True", "def was_successful(self):\n return self._build_proto.status == common.SUCCESS", "def has_cargo(self) -> bool:\n return bool(self.proto.cargo_space_taken)", "def busy(self):\n pass", "def test_job_complete(self):\r\n t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')\r\n self.assertFalse(job_complete(t))\r\n self.assertFalse(job_complete(t.Children[0]))\r\n self.assertFalse(job_complete(t.Children[1].Children[1]))\r\n\r\n self.assertRaises(JobError, job_complete, t.Children[0].Children[0])\r\n\r\n f = 'test_parallel_merge_otus_JOB_COMPLETE_TEST.poll'\r\n self.assertFalse(os.path.exists(f))\r\n\r\n testf = open(f, 'w')\r\n testf.write('0\\n')\r\n testf.close()\r\n t.PollPath = f\r\n t.StartTime = 10\r\n\r\n self.assertTrue(job_complete(t))\r\n self.assertNotEqual(t.EndTime, None)\r\n self.assertNotEqual(t.TotalTime, None)\r\n\r\n testf = open(f, 'w')\r\n testf.write('1\\n')\r\n testf.close()\r\n\r\n self.assertRaises(JobError, job_complete, t)\r\n t.Processed = False\r\n self.assertRaises(JobError, job_complete, t)\r\n\r\n os.remove(f)", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def is_ready(self) -> bool:\n return self.build_progress == 1.0", "def __is_complete__(self,config,mockdb):\n if GenericProcess.__is_complete__(self):\n return True\n if self.pipelines is None:\n return False\n for pipeline in self.__current_pipeline_list__(mockdb):\n if not pipeline.__is_complete__():\n return False\n return True", "def testDeadlockDetection(self):\n for test in range(10):\n # Make a random DAG for the set of child edges\n nodeNumber = random.choice(range(2, 20))\n childEdges = self.makeRandomDAG(nodeNumber)\n # Get an adjacency list representation and check is acyclic\n adjacencyList = self.getAdjacencyList(nodeNumber, childEdges)\n self.assertTrue(self.isAcyclic(adjacencyList))\n\n # Add in follow-on edges - these are returned as a list, and as a set of augmented\n # edges in the adjacency list\n # edges in the adjacency list\n followOnEdges = self.addRandomFollowOnEdges(adjacencyList)\n self.assertTrue(self.isAcyclic(adjacencyList))\n # Make the job graph\n rootJob = self.makeJobGraph(nodeNumber, childEdges, followOnEdges, None)\n rootJob.checkJobGraphAcylic() # This should not throw an exception\n rootJob.checkJobGraphConnected() # Nor this\n # Check root detection explicitly\n self.assertEqual(rootJob.getRootJobs(), {rootJob})\n\n # Test making multiple roots\n childEdges2 = childEdges.copy()\n childEdges2.add((nodeNumber, 1)) # This creates an extra root at \"nodeNumber\"\n rootJob2 = self.makeJobGraph(nodeNumber + 1, childEdges2, followOnEdges, None, False)\n try:\n rootJob2.checkJobGraphConnected()\n self.assertTrue(False) # Multiple roots were not detected\n except JobGraphDeadlockException:\n pass # This is the expected behaviour\n\n def checkChildEdgeCycleDetection(fNode, tNode):\n childEdges.add((fNode, tNode)) # Create a cycle\n adjacencyList[fNode].add(tNode)\n self.assertTrue(not self.isAcyclic(adjacencyList))\n try:\n self.makeJobGraph(nodeNumber, childEdges,\n followOnEdges, None).checkJobGraphAcylic()\n self.assertTrue(False) # A cycle was not detected\n except JobGraphDeadlockException:\n pass # This is the expected behaviour\n # Remove the edges\n childEdges.remove((fNode, tNode))\n adjacencyList[fNode].remove(tNode)\n # Check is now acyclic again\n self.makeJobGraph(nodeNumber, childEdges,\n followOnEdges, None, False).checkJobGraphAcylic()\n\n def checkFollowOnEdgeCycleDetection(fNode, tNode):\n followOnEdges.add((fNode, tNode)) # Create a cycle\n try:\n self.makeJobGraph(nodeNumber, childEdges,\n followOnEdges, None, False).checkJobGraphAcylic()\n # self.assertTrue(False) #The cycle was not detected\n except JobGraphDeadlockException:\n pass # This is the expected behaviour\n # Remove the edges\n followOnEdges.remove((fNode, tNode))\n # Check is now acyclic again\n self.makeJobGraph(nodeNumber, childEdges,\n followOnEdges, None, False).checkJobGraphAcylic()\n\n # Now try adding edges that create a cycle\n\n # Pick a random existing order relationship\n fNode, tNode = self.getRandomEdge(nodeNumber)\n while tNode not in self.reachable(fNode, adjacencyList):\n fNode, tNode = self.getRandomEdge(nodeNumber)\n\n # Try creating a cycle of child edges\n checkChildEdgeCycleDetection(tNode, fNode)\n\n # Try adding a self child edge\n node = random.choice(range(nodeNumber))\n checkChildEdgeCycleDetection(node, node)\n\n # Try adding a follow on edge from a descendant to an ancestor\n checkFollowOnEdgeCycleDetection(tNode, fNode)\n\n # Try adding a self follow on edge\n checkFollowOnEdgeCycleDetection(node, node)\n\n # Try adding a follow on edge between two nodes with shared descendants\n fNode, tNode = self.getRandomEdge(nodeNumber)\n if (len(self.reachable(tNode, adjacencyList)\n .intersection(self.reachable(fNode, adjacencyList))) > 0\n and (fNode, tNode) not in childEdges and (fNode, tNode) not in followOnEdges):\n checkFollowOnEdgeCycleDetection(fNode, tNode)", "def get_compatible_parts_for_component(current_rig, target_component):\n \n \n \"\"\"\n Method idea:\n \n - send request to target_component specific rules...\n - should I first \n \n \"\"\"\n \n \n if target_component == Component.CPU:\n \n \n pass", "def test__get_component_version_empty(self):\n self._ucr({'repository/online/component/a/version': ''})\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): '',\n })\n ver = U.UCS_Version((MAJOR, MINOR, 0)) # comonent.erratalevel!\n comp_ver = self.u._get_component_versions('a', start=ver, end=ver)\n self.assertEqual(set((ver,)), comp_ver)", "def is_complete(self):\n pass" ]
[ "0.6828113", "0.58759904", "0.5640557", "0.5590499", "0.556272", "0.54919034", "0.5406992", "0.5406992", "0.54033756", "0.5342669", "0.52576536", "0.5213345", "0.5185308", "0.5156774", "0.51279163", "0.51243275", "0.51208884", "0.5092463", "0.5070662", "0.5057657", "0.50547624", "0.5028718", "0.50189865", "0.5011386", "0.4992176", "0.49554265", "0.49276355", "0.49089688", "0.4903732", "0.4897261" ]
0.7436549
0
Get the uploaded file, execute the dss in the background (multiple executions of the model)
async def exec_dss(req, resp): logger.info("got a request for executing a dss") media = await req.media('files') logger.debug("%s", str(media)) params = json.loads(media['input']['content']) if 'model_name' in media: params['model_run']['model_name'] = media['model_name'].decode() exec_id = wqdss.processing.get_exec_id() @api.background.task def dss_task(loop): logger.info("Going to execute dss!") loop.create_task(wqdss.processing.execute_dss(exec_id, params)) dss_task(asyncio.get_running_loop()) logger.info("created task %s", exec_id) resp.media = {"id": exec_id}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))", "def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()", "def load(self) -> FileHandle:\n with self.service() as api:\n return api.runs().get_result_file(\n run_id=self.run_id,\n file_id=self.file_id\n )", "def do_start(self, action):\n StaticFile = Pool().get('nereid.static.file')\n\n static_file = StaticFile(Transaction().context.get('active_id'))\n static_file.is_large_file = True\n static_file.save()\n\n post_args = static_file.get_post_form_args()\n\n action['url'] = self.base_url + '?data=' + \\\n base64.b64encode(json.dumps(post_args))\n\n return action, {}", "def post(self):\n filekey = self.request.get(\"filekey\")\n blob_key = self.request.get(\"blobkey\")\n\n if self.request.get(\"daily_speed_sum\"):\n logging.info(\"Starting daily speed sum...\")\n pipeline = DailySpeedSumPipeline(filekey, blob_key)\n pipeline.start()\n self.redirect(pipeline.base_path + \"/status?root=\" + pipeline.pipeline_id)\n else:\n\t logging.info(\"Unrecognized operation.\")", "def fpupload(request, dataset_id):\n\n dataset = Dataset.objects.get(id=dataset_id)\n logger.debug('called fpupload')\n\n if request.method == 'POST':\n logger.debug('got POST')\n for key, val in request.POST.items():\n splits = val.split(\",\")\n for url in splits:\n try:\n fp = FilepickerFile(url)\n except ValueError:\n pass\n else:\n picked_file = fp.get_file()\n filepath = write_uploaded_file_to_dataset(dataset,\n picked_file)\n datafile = Dataset_File(dataset=dataset,\n filename=picked_file.name,\n size=picked_file.size)\n replica = Replica(datafile=datafile,\n url=filepath,\n protocol='',\n location=Location.get_default_location())\n replica.verify(allowEmptyChecksums=True)\n datafile.save()\n replica.datafile = datafile\n replica.save()\n\n return HttpResponse(json.dumps({\"result\": True}))", "def run(self):\n download(self.attempt)", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def handleDQMFileSaver(self):\n\n runIsComplete = getattr(self.jobBag, \"runIsComplete\", False)\n multiRun = getattr(self.jobBag, \"multiRun\", False)\n runLimits = getattr(self.jobBag, \"runLimits\", \"\")\n self.logger.info(\"DQMFileSaver set to multiRun: %s, runIsComplete: %s, runLimits: %s\",\n multiRun, runIsComplete, runLimits)\n\n procScript = \"cmssw_handle_dqm_filesaver.py\"\n\n cmd = \"%s --input_pkl %s --output_pkl %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.configPickle),\n os.path.join(self.stepSpace.location, self.configPickle))\n\n if hasattr(self.step.data.application.configuration, \"pickledarguments\"):\n pklArgs = encodeUnicodeToBytes(self.step.data.application.configuration.pickledarguments)\n args = pickle.loads(pklArgs)\n datasetName = args.get('datasetName', None)\n if datasetName:\n cmd += \" --datasetName %s\" % (datasetName)\n if multiRun and runLimits:\n cmd += \" --multiRun --runLimits=%s\" % (runLimits)\n if runIsComplete:\n cmd += \" --runIsComplete\"\n self.scramRun(cmd)\n\n return", "def get_current_file(self):\n#-----------on attend la fin de creation du fichier Nexus\n \n while self._ismoving():\n self.logger.debug(\"DataRecorder creat Nexus file\") \n time.sleep(1.0)\n return self.dp.currentFiles[0]", "def get_processed_file_record(session: Session, url: str) -> ProcessedModelRunUrl:\n processed_file = session.query(ProcessedModelRunUrl).\\\n filter(ProcessedModelRunUrl.url == url).first()\n return processed_file", "def execute (self, context):\n # call the main import function. This function should work\n # independent of this context-manager/operator logic.\n category = context.scene.dsf_category\n filepath = self.properties.filepath\n (libdir, srpath) = self.split_scene_filepath (filepath)\n drpath = self.construct_data_path (srpath, category)\n data_rpath = os.path.join (\"/\", drpath)\n scene_rpath = os.path.join (\"/\", srpath)\n log.info (\"libdir: %s\", libdir)\n log.info (\"scene_rpath: %s\", scene_rpath)\n log.info (\"data_rpath: %s\", data_rpath)\n scale = context.scene.dsf_scale\n bpy.ops.dsf.export_props (scene_path = scene_rpath,\\\n data_path = data_rpath, base_dir = libdir, scale = scale)\n return { 'FINISHED' }", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def do_work(self):", "def post(self):\n filename = str(time.time())\n filepath = os.path.join(\n os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n with open(filepath, 'bw') as uploadfile:\n chunk_size = 1024\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) == 0:\n break\n uploadfile.write(chunk)\n current_app.logger.info('file %s upload successfully', filename)\n return {'timestamp': filename}, http.HTTPStatus.CREATED", "def upload_process(self, rec_id: str): # pylint: disable=unused-variable\n\n # Process the uploaded file\n if self.connector == \"overtime\":\n importer = action_process_overtime_data(\n get_record_path(rec_id), output=print, show_status=True\n )\n action_commit_overtime_data(importer, output=print)\n else:\n flash(\"Unknown upload file type :(\", \"error\")\n\n flash(\"Data successfully uploaded!\", \"info\")\n\n return redirect(url_for(f\"{self.endpoint}.upload\"))", "def upload_submission(request, learner, trigger, no_thumbnail=True):\n base_dir_for_file_uploads = settings.MEDIA_ROOT\n thumbnail_file_name_django = ''\n entry_point = trigger.entry_point\n\n files = request.FILES.getlist('file_upload', None)\n if files is None:\n return None\n\n # Is the storage space reachable?\n deepest_dir = base_dir_for_file_uploads + 'uploads/{0}/tmp/'.format(\n entry_point.id)\n\n try:\n os.makedirs(deepest_dir)\n except OSError:\n if not os.path.isdir(deepest_dir):\n logger.error('Cannot create directory for upload: {0}'.format(\n deepest_dir))\n raise\n\n if len(files) == 1:\n filename = files[0].name\n extension = filename.split('.')[-1].lower()\n submitted_file_name_django = 'uploads/{0}/{1}'.format(entry_point.id,\n generate_random_token(token_length=16) + '.' + extension)\n full_path = base_dir_for_file_uploads + submitted_file_name_django\n with open(full_path, 'wb+') as dst:\n for chunk in files[0].chunks():\n dst.write(chunk)\n\n\n f_size = os.path.getsize(full_path)\n if f_size > trigger.max_file_upload_size_MB * 1024 * 1024:\n logger.warning('File too large {0}'.format(\n submitted_file_name_django))\n return None, ('File too large ({0} MB); it must be less than '\n '{1} MB.'.format(round(float(f_size/1024.0/1024.0), 1),\n trigger.max_file_upload_size_MB))\n\n\n else: #if trigger.allow_multiple_files: this is removed for now\n filename = ''\n extension = ''\n submitted_file_name_django = ''\n full_path = ''\n\n\n # Check that the file format is PDF, if that is required.\n strike1 = False\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'application/pdf' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PDF upload: {0} [{1}]'.format(mime,\n full_path))\n #return None, 'Invalid file uploaded. Uploaded file must be a PDF.'\n\n doc = PdfFileReader(full_path)\n if doc.isEncrypted:\n logger.debug('Encrypted PDF upload: {0}'.format(full_path))\n return None, ('An encrypted PDF cannot be uploaded. Please remove '\n 'the encryption and try again.')\n\n\n strike1 = False\n if (('jpeg' in trigger.accepted_file_types_comma_separated.lower()) or \\\n ('jpg' in trigger.accepted_file_types_comma_separated.lower())) and \\\n extension in ('jpg', 'jpeg'):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/jpeg' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid JPG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable JPEG file.')\n\n\n strike1 = False\n if ('png' in trigger.accepted_file_types_comma_separated.lower()) and \\\n extension in ('png',):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/png' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PNG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable PNG file.')\n\n\n strike2 = False\n if extension.lower() not in \\\n trigger.accepted_file_types_comma_separated.lower():\n logger.debug('Invalid file type upload: received \".{0}\"; [{1}]'.format(\\\n extension, full_path))\n return None, ('Invalid file uploaded. Uploaded file must be: {}'.format(\\\n trigger.accepted_file_types_comma_separated))\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n prior = Submission.objects.filter(status='S',\n submitted_by=learner,\n entry_point=entry_point,\n is_valid=True\n )\n else:\n prior_indiv = Q(status='S', submitted_by=learner, entry_point=entry_point,\n trigger=trigger, is_valid=True)\n\n # We need this here, but also for the code later in the next\n # if (trigger==entry_point) part\n\n # Default returned by this function is ``None`` if the user is not\n # enrolled in a group, or if this course simply does not use groups.\n group_submitted = is_group_submission(learner, entry_point)\n if is_group_submission(learner, entry_point):\n group_submitted = group_submitted.group\n\n prior_group = Q(status='S', group_submitted=group_submitted,\n entry_point=entry_point, trigger=trigger,\n is_valid=True)\n else:\n prior_group = Q()\n\n prior = Submission.objects.filter(prior_indiv | prior_group)\n\n\n for item in prior:\n logger.debug(('Setting prior submission to False: {0} and name '\n '\"{1}\"'.format(str(item), item.submitted_file_name)))\n item.is_valid = False\n item.save()\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n sub = Submission(submitted_by=learner,\n group_submitted=None,\n status='S',\n entry_point=entry_point,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n else:\n\n sub = Submission(submitted_by=learner,\n group_submitted=group_submitted,\n status='S',\n entry_point=entry_point,\n trigger=trigger,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n clean_PDF(sub)\n\n return sub", "def job_generation(self):\n # Map a file type to a DAO\n if self.file_type == SmsFileTypes.N1_MC1:\n self.file_dao = SmsN1Mc1Dao()\n\n else:\n self.file_dao = None\n\n if self.file_dao:\n source_data = self.file_dao.source_data(recipient=self.recipient)\n\n if source_data:\n\n self.file_transfer_def = self.file_dao.get_transfer_def(recipient=self.recipient)\n\n self.export_data_to_cloud(source_data)\n\n self.write_data_to_manifest_table(source_data)", "def upload_single_file(request):\n message, success, title = \"\", 0, \"error\"\n is_data_ok = False\n\n if request.method == 'POST':\n data_in_post = [\"id_campaign\", \"field_name\"]\n # defined in utils.py\n is_data_in_post = check_all_data_available_in_post(\n data_in_post, request.POST)\n\n if is_data_in_post['success']:\n is_data_ok = True\n else:\n message = is_data_in_post['message']\n\n if is_data_ok:\n for filename, file in request.FILES.items():\n name = request.FILES[filename].name\n print(\"filename : \", name)\n\n # myfile = request.FILES['abm_company_list_file']\n myfile = request.FILES[filename]\n fs = FileSystemStorage()\n filename = fs.save(\"campaign/\" + myfile.name, myfile)\n print(filename)\n\n # get campaign id\n id_campaign = request.POST.get(\"id_campaign\")\n\n # django get campaign object from model\n campaign = Campaign.objects.filter(id=id_campaign).first()\n\n if campaign:\n # get specification record\n specification = Specification.objects.filter(campaign=campaign).first()\n if specification:\n # get field name to save\n field_name = request.POST.get(\"field_name\")\n\n # check object has property with field name\n if hasattr(specification, field_name):\n # nested_setattr(object, 'pet.name', 'Sparky')\n model_field_name = str(field_name) + \".name\"\n model_field_name = model_field_name.replace(\" \", \"\")\n print(model_field_name)\n\n # set nested attribute\n # ex. form.name\n nested_setattr(specification, model_field_name, filename)\n\n specification.save()\n print(nested_getattr(specification, model_field_name, 'default')) # will print string similar to filename\n\n success = 1\n title = 'success'\n message = \"specification updated successfully\"\n else:\n message += \"Error... Specification table has no field '\" + field_name + \"'\"\n\n else:\n message += \"Specification not exists with campaign: '\", str(campaign), \"'\"\n else:\n message += \"Campaign not exist with id : '\", id_campaign, \"'\"\n\n # uploaded_file_url = fs.url(filename)\n success = 1\n else:\n message = \"Please post data using post method\"\n\n jsonresponse = {\n \"success\": 1,\n \"title\": request.POST,\n \"message\": message,\n }\n return JsonResponse(jsonresponse, safe=False)", "async def create_upload_file( background_tasks: BackgroundTasks, file: UploadFile = File(...), db : Session = Depends(get_db)):\n background_tasks.add_task(process_acti, file)\n return {\"status\": \"success\"}", "def sendRequest(event, context):\n file = event\n print(f\"Processing file: {file['name']}.\")\n\n filename = file['name']\n\n url = 'http://34.123.136.112:5000'\n myobj = {'filename': filename}\n\n x = requests.post(url, data = myobj)\n\n print(x.text)", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def upload_file(request):\n if request.method == \"POST\":\n form = UploadFileForm(request.POST, request.FILES)\n form.fields['blade'].initial = [0]\n if form.is_valid():\n handle_uploaded_file(request.FILES[\"file\"])\n file_name = request.FILES[\"file\"].name\n\n # Select daq based on blade\n blade = request.POST['blade']\n if blade == 0 or blade == 1 or blade == 4:\n daq = 0\n else:\n daq = 2\n\n # Run the executable\n parameters = request.POST['parameters']\n start = time.time()\n s = ssh.Connection('ac' + blade, username=account_info.username, password=account_info.password)\n results = s.execute('time ~/PowerViz_Executables/executable ' + parameters)\n end = time.time()\n s.close()\n\n elapsed = str(end - start) + \" s\"\n cache.set('start_time', start, 7200)\n cache.set('end_time', end, 7200)\n print \"Just set start_time to: \" + str(cache.get('start_time'))\n print \"Just set end_time to: \" + str(cache.get('end_time'))\n\n # Record results to the dictionary\n c = {\"results\":results[0], \"time\": elapsed}\n\n return render_to_response(\"upload_success.html\", {'c': c})\n\n # Create the form\n c = RequestContext(request)\n\n # Create the form for uploading the file\n form = UploadFileForm()\n c[\"form\"] = form\n\n return render_to_response(\"upload.html\", c)", "def call_file_submission(self):\n if not self.filesSubmitted:\n if CONFIG_BROKER[\"use_aws\"]:\n self.filenames = {\"appropriations\": \"test1.csv\",\n \"award_financial\": \"test2.csv\",\n \"program_activity\": \"test4.csv\",\n \"cgac_code\": \"SYS\", \"frec_code\": None,\n \"reporting_period_start_date\": \"01/2001\",\n \"reporting_period_end_date\": \"03/2001\", \"is_quarter\": True}\n else:\n # If local must use full destination path\n file_path = CONFIG_BROKER[\"broker_files\"]\n self.filenames = {\"appropriations\": os.path.join(file_path, \"test1.csv\"),\n \"award_financial\": os.path.join(file_path, \"test2.csv\"),\n \"program_activity\": os.path.join(file_path, \"test4.csv\"),\n \"cgac_code\": \"SYS\", \"frec_code\": None,\n \"reporting_period_start_date\": \"01/2001\",\n \"reporting_period_end_date\": \"03/2001\", \"is_quarter\": True}\n self.submitFilesResponse = self.app.post_json(\"/v1/submit_files/\", self.filenames,\n headers={\"x-session-id\": self.session_id})\n self.updateSubmissionId = self.submitFilesResponse.json[\"submission_id\"]\n return self.submitFilesResponse", "def _execute(self):\n print(\"[ -ENGINE- ] Executing FTP Download ..\")\n # self.time_point(tag = 'execution')\n main = self.import_engine_as_python_function()\n downloaded_files = main(\n ftp_url=self.params.get(\"ftp_url\", None),\n folder=self.params.get(\"ftp_folder\", None),\n login=self.params.get(\"ftp_login\", None),\n password=self.params.get(\"ftp_password\", None),\n include_ext=self.params.get(\"ftp_include_ext\", None),\n output_folder=self.params.get(\"ftp_output_folder\", None),\n max_number_of_files=self.params.get(\"ftp_max_number_of_files\", None),\n blocksize=self.params.get(\"ftp_blocksize\", None),\n )\n # self.print_execution_time(tag='execution')\n self.io[\"output\"][\"finfo\"][\"dir\"] = os.path.dirname(downloaded_files[-1])\n self.io[\"output\"][\"finfo\"][\"file\"] = os.path.basename(downloaded_files[-1])\n return", "def upload(request):\n gi = GalaxyInstance(url=request.session.get('server'), email=request.session.get('galaxyemail'), password=request.session.get(\"galaxypass\"))\n selected = request.POST.get('selected')\n selectedmeta = request.POST.get('meta')\n filetype = request.POST.get('filetype')\n dbkey = request.POST.get('dbkey')\n workflowid = request.POST.get('workflowid')\n pid = request.POST.get('data_id')\n onlydata = request.POST.get('onlydata')\n makecol = request.POST.get('col')\n data_ids = []\n control = request.POST.get('samples')\n test = request.POST.get('samplesb')\n new_hist = request.POST.get('historyname')\n group = request.POST.get('group')\n investigation = request.POST.get('investigation')\n date = strftime(\"%d_%b_%Y_%H:%M:%S\", gmtime())\n select = selected.split(',')\n mselect = selectedmeta.split(',')\n gselect = group.split(',')\n iselect = investigation.split(',')\n files = get_selection(iselect, gselect, select, mselect)[0]\n mfiles = get_selection(iselect, gselect, select, mselect)[1]\n groups = get_selection(iselect, gselect, select, mselect)[2]\n investigations = get_selection(iselect, gselect, select, mselect)[3]\n history_id = create_new_hist(gi, request.session.get('galaxyemail'), request.session.get(\"galaxypass\"),\n request.session.get('server'), workflowid, files, new_hist)\n inputs = {}\n if len(filter(None, files)) <= 0:\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n if onlydata == \"true\":\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n else:\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n make_meta_files(gi, mfiles, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id)\n if workflowid != \"0\":\n in_count = 0\n resultid = uuid.uuid1()\n datamap = dict()\n mydict = {}\n jsonwf = gi.workflows.export_workflow_json(workflowid)\n for i in range(len(jsonwf[\"steps\"])):\n if jsonwf[\"steps\"][str(i)][\"name\"] == \"Input dataset\":\n try:\n label = jsonwf[\"steps\"][str(i)][\"inputs\"][0][\"name\"]\n except IndexError:\n label = jsonwf[\"steps\"][str(i)][\"label\"]\n mydict[\"in%s\" % (str(i + 1))] = gi.workflows.get_workflow_inputs(workflowid, label=label)[0]\n for k, v in mydict.items():\n datamap[v] = {'src': \"hda\", 'id': get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count]}\n data_ids.append(get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count])\n in_count += 1\n if makecol == \"true\":\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n gi.workflows.invoke_workflow(workflowid, datamap, history_id=history_id)\n gi.workflows.export_workflow_to_local_path(workflowid, request.session.get('username'), True)\n datafiles = get_output(request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'))\n store_results(1, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n store_results(3, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n ga_store_results(request.session.get('username'), request.session.get('password'), workflowid,\n request.session.get('storage'), resultid, groups, investigations)\n call([\"rm\", request.session.get('username') + \"/input_test\"])\n return render_to_response('results.html', context={'workflowid': workflowid, 'inputs': inputs, 'pid': pid,\n 'server': request.session.get('server')})\n else:\n if makecol == \"true\":\n history_data = gi.histories.show_history(history_id, contents=True)\n for c in range(0, len(history_data)):\n data_ids.append(history_data[c]['id'])\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n ug_store_results(\n request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'), workflowid,\n request.session.get('username'), request.session.get('password'), request.session.get('storage'), groups, investigations, date)\n return HttpResponseRedirect(reverse(\"index\"))", "def post(self, request):\n\t\t# TODO: form validation\n\t\t# extract attribute\n\t\tpublicized = False if request.POST['is_publicized'] == \"false\" else True\n\t\t# grab the request data and create dataset\n\t\tdataset = Dataset(title = request.POST['title'], \\\n\t\t\t\t\t\tpublicized = publicized, \\\n\t\t\t\t\t\traw_data_file = request.FILES['file'], \\\n\t\t\t\t\t\tuploader = request.user,)\n\t\t# save the metadata for the dataset \n\t\t# along with file as Django internally saved it\n\t\tdataset.save()\n\t\t# TODO: Notify spark with dataset raw data path\n\t\t# and the dataset id so that later Spark callback\n\t\t# can retrieve the user related to this dataset\n\t\treturn JsonResponse({\n\t\t\t\t'status' : \"success\",\n\t\t\t\t'message' : \"Your dataset has been uploaded and is being processed. \\\n\t\t\t\t\t\t\twe will send you an email once it's done!\"\n\t\t\t})", "def _execute(self, skip_permission_check=False):\n\n # Setup the user credentials and logger\n self._setup()\n\n # Create and check the resource directory\n self.storage_interface.setup()\n\n EphemeralProcessing._execute(self)\n\n # Export all resources and generate the finish response\n self._export_resources()", "def load_batch(self, request, *args, **kwargs):\n try:\n # get a list of the files in the associated path\n base_path = self.request.user.profile.VideoExperiment_path\n file_list = listdir(base_path)\n # include only csv files\n file_list = [el[:-4] for el in file_list if ('.csv' in el) and ('sync' not in el)]\n # get a list of the existing file names (bonsai)\n existing_rows = [el[0] for el in VideoExperiment.objects.values_list('slug')]\n # for all the files\n for file in file_list:\n # check if the entry already exists\n if file.lower() in existing_rows:\n # if so, skip making a new one\n continue\n # get the data for the entry\n data_dict = parse_path_experiment(file, self, 'VideoExperiment_path')\n # get rid of the animal2 entry\n del data_dict['animal2']\n # of the screen one\n del data_dict['screen_path']\n # and of the motive one\n del data_dict['track_path']\n # check the paths in the filesystem, otherwise leave the entry empty\n for key, value in data_dict.items():\n # if the entry is already empty, don't check\n if data_dict[key] == '':\n continue\n if (isinstance(value, str)) and ('path' in key) and (not exists(value)):\n # print a warning\n print('Path not found for key %s and value %s' % (key, value))\n # clear the path\n data_dict[key] = ''\n\n # # if the tif file exists but the calcium_data file doesn't, log it in the notes\n # This is for when we didn't have calcium signal extraction as part of snakemake\n # if (data_dict['fluo_path'] == '') and (data_dict['tif_path'] != ''):\n # data_dict['imaging'] = 'no'\n # data_dict['notes'] += 'norois'\n # create the model instance with the data\n model_instance = VideoExperiment.objects.create(**data_dict)\n # get the model for the experiment type to use\n experiment_type = ExperimentType.objects.filter(experiment_name='Free_behavior')\n # add the experiment type to the model instance (must use set() cause m2m)\n model_instance.experiment_type.set(experiment_type)\n # save the model instance\n model_instance.save()\n\n return HttpResponseRedirect('/loggers/video_experiment/')\n except:\n print('Problem file:' + file)\n return HttpResponseBadRequest('loading file %s failed, check file names' % file)", "def handle_uploaded_file(f):\n path = settings.ABS_PATH + \"Server_data_visualization/uploads/executable\"\n destination = open(path, \"wb+\")\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n # os.chmod(path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)" ]
[ "0.5591472", "0.5509878", "0.5504769", "0.54786956", "0.547423", "0.54169196", "0.5415467", "0.53959715", "0.53637743", "0.5355542", "0.53186345", "0.5317722", "0.5310746", "0.5300013", "0.5291285", "0.52872956", "0.5278462", "0.5226595", "0.5223359", "0.52202976", "0.52200496", "0.52180284", "0.52032465", "0.5174217", "0.5173603", "0.51735204", "0.5171732", "0.5167241", "0.51613855", "0.51577026" ]
0.6811003
0
Plots how additional peaks are imputed in input_wavelist from reference_wavelist by WaveCrossValidator
def plot_cross_validator(input_wavelist: WaveList, reference_wavelist: WaveList, results: DataFrame, filename: str, plot_path: str): fig, axs = plt.subplots(nrows=2, ncols=2) # plot peaks after sub_c axs[0, 0].set_title('Peaks in Original Series') axs[0, 0].plot(input_wavelist.raw_data.values) axs[0, 0].scatter(input_wavelist.peaks_sub_c['location'].values, input_wavelist.raw_data.values[ input_wavelist.peaks_sub_c['location'].values.astype(int)], color='red', marker='o') # plot peaks from sub_e axs[0, 1].set_title('After Cross-Validation') axs[0, 1].plot(input_wavelist.raw_data.values) axs[0, 1].scatter(results['location'].values, input_wavelist.raw_data.values[ results['location'].values.astype(int)], color='red', marker='o') # plot peaks from reference series axs[1, 1].set_title('Peaks in Reference Series') axs[1, 1].plot(reference_wavelist.raw_data.values) axs[1, 1].scatter(reference_wavelist.peaks_sub_c['location'].values, reference_wavelist.raw_data.values[ reference_wavelist.peaks_sub_c['location'].values.astype(int)], color='red', marker='o') fig.tight_layout() plt.savefig(os.path.join(plot_path, filename + '_algorithm_e.png')) plt.close('all')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def plot_mean_hfo(evlist,color='blue', xlim =[-1,1], figure_size=(10,10),dpi=600,saveplot = None):\n f = plt.figure(figsize=figure_size,dpi=dpi)\n \n \n \n \n raw = np.array([]) # creating a empty array \n filt = np.array([]) # creating a empty array\n pxx = np.array([]) # creating a empty array\n nwave, a = evlist[0].waveform.shape\n time_v = np.linspace(-1,1,nwave,endpoint=True)\n npw, = evlist[0].spectrum.nPxx.shape\n F = evlist[0].spectrum.F\n for hfo in evlist:\n raw = np.append(raw, hfo.waveform[:,0])\n #ax1.plot(time_v,hfo.waveform[:,0],lw=.5)\n filt = np.append(filt, hfo.waveform[:,1])\n #ax2.plot(time_v,hfo.waveform[:,1],lw=.5)\n pxx = np.append(pxx, hfo.spectrum.nPxx)\n \n raw = raw.reshape(len(evlist),nwave)\n filt = filt.reshape(len(evlist),nwave)\n pxx = pxx.reshape(len(evlist),npw)\n\n \n \n \n ax1 = plt.subplot(311)\n m = np.mean(raw,0)\n s = np.std(raw,0)/np.sqrt(raw.shape[0])\n plt.plot(time_v,m,'k',lw=2)\n #ax1.fill_between(time_v,m+s,m-s, facecolor=color, alpha=0.1)\n ax1.set_xlim(xlim)\n adjust_spines(ax1, ['left'])\n \n ax2 = plt.subplot(312)\n m = np.mean(filt,0)\n s = np.std(filt,0)/np.sqrt(filt.shape[0])\n plt.plot(time_v,m,'k',lw=2)\n #ax2.fill_between(time_v,m+s,m-s, facecolor=color, alpha=0.1)\n ax2.set_xlim(xlim)\n adjust_spines(ax2, ['left', 'bottom'])\n \n ax3 = plt.subplot(313)\n m = np.mean(pxx,0)\n s = np.std(pxx,0)/np.sqrt(pxx.shape[0])\n plt.plot(F,m,'k',lw=2)\n ax3.fill_between(F,m+s,m-s, facecolor=color, alpha=0.1)\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')", "def analyze_wfs_no_png(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n peaks_temp = pd.DataFrame()\n num_fig = 0\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n # Ora faccio un loop sugli eventi..\n for event in self.table_sipm_time['ev']:\n\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_temp], ignore_index=True)\n\n bar.finish()\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))", "def analyze_ev_wf_compact(self, event, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n fig, ax = plt.subplots(nrows=3, ncols=3)\n peaks_temp = pd.DataFrame()\n\n for i in range(0, 9):\n if event < len(self.table_sipm_time):\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n # Ora posso plottare tutto:\n plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))\n # la waveform..\n ax[int(i / 3)][i % 3].plot(wf_time,\n wf_ch, linestyle='-', linewidth=1)\n # ..la baseline..\n ax[int(i / 3)][i % 3].plot(bsl_time, bsl_ch, linestyle='-',\n linewidth=1, c='darkgreen')\n # ..e i picchi (se ci sono)\n if len(peaks) > 0:\n ax[int(i / 3)][i % 3].scatter(wf_time.iloc[peaks],\n wf_ch.iloc[peaks], c='darkred')\n\n # Set common labels\n fig.text(0.5, 0.01, 'Time (s)', ha='center', va='center')\n fig.text(0.02, 0.5, 'Amplitude (V)', ha='center', va='center', rotation='vertical')\n \n \n # plt.show()\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n event += 1\n\n # ..e salvo il plot in una cartella a parte\n folder_name = 'plot'\n plot_name = '{0}/{1}_ev{2}.png'.format(\n folder_name, pic_name, event)\n fig.savefig(plot_name)\n plt.close(fig)\n\n # La funzione restituisce i valori di tempo e ampiezza (ottenuta come Ch1-baseline)..\n # ..agli indici dei massimi trovati da find_peaks\n return peaks_temp", "def plot_solution(self, identlist, aperture_lst, plot_ax1=False, **kwargs):\n coeff = kwargs.pop('coeff')\n k = kwargs.pop('k')\n offset = kwargs.pop('offset')\n npixel = kwargs.pop('npixel')\n std = kwargs.pop('std')\n nuse = kwargs.pop('nuse')\n ntot = kwargs.pop('ntot')\n xorder = kwargs.pop('xorder')\n yorder = kwargs.pop('yorder')\n clipping = kwargs.pop('clipping')\n maxiter = kwargs.pop('maxiter')\n\n label_size = 13 # fontsize for x, y labels\n tick_size = 12 # fontsize for x, y ticks\n\n #wave_scale = 'linear'\n wave_scale = 'reciprocal'\n\n #colors = 'rgbcmyk'\n\n self._ax2.cla()\n self._ax3.cla()\n\n if plot_ax1:\n self._ax1.cla()\n x = np.linspace(0, npixel-1, 100, dtype=np.float64)\n\n # find the maximum and minimum wavelength\n wl_min, wl_max = 1e9,0\n allwave_lst = {}\n for aperture in aperture_lst:\n order = k*aperture + offset\n wave = get_wavelength(coeff, npixel, x, np.repeat(order, x.size))\n allwave_lst[aperture] = wave\n wl_max = max(wl_max, wave.max())\n wl_min = min(wl_min, wave.min())\n # plot maximum and minimum wavelength, to determine the display\n # range of this axes, and the tick positions\n self._ax1.plot([0, 0],[wl_min, wl_max], color='none')\n yticks = self._ax1.get_yticks()\n self._ax1.cla()\n\n\n for aperture in aperture_lst:\n order = k*aperture + offset\n color = 'C{}'.format(order%10)\n\n # plot pixel vs. wavelength\n if plot_ax1:\n wave = allwave_lst[aperture]\n if wave_scale=='reciprocal':\n self._ax1.plot(x, 1/wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n else:\n self._ax1.plot(x, wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n\n # plot identified lines\n if aperture in identlist:\n list1 = identlist[aperture]\n pix_lst = list1['pixel']\n wav_lst = list1['wavelength']\n mask = list1['mask'].astype(bool)\n res_lst = list1['residual']\n\n if plot_ax1:\n if wave_scale=='reciprocal':\n self._ax1.scatter(pix_lst[mask], 1/wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], 1/wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n else:\n self._ax1.scatter(pix_lst[mask], wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n\n repeat_aper_lst = np.repeat(aperture, pix_lst.size)\n self._ax2.scatter(repeat_aper_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax2.scatter(repeat_aper_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n self._ax3.scatter(pix_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax3.scatter(pix_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n\n # refresh texts in the residual panels\n text = 'R.M.S. = {:.5f}, N = {}/{}'.format(std, nuse, ntot)\n self._ax3._residual_text.set_text(text)\n text = u'Xorder = {}, Yorder = {}, clipping = \\xb1{:g}, Niter = {}'.format(\n xorder, yorder, clipping, maxiter)\n self._ax2._fitpar_text.set_text(text)\n\n # adjust layout for ax1\n if plot_ax1:\n self._ax1.set_xlim(0, npixel-1)\n if wave_scale == 'reciprocal':\n _y11, _y22 = self._ax1.get_ylim()\n newtick_lst, newticklabel_lst = [], []\n for tick in yticks:\n if _y11 < 1/tick < _y22:\n newtick_lst.append(1/tick)\n newticklabel_lst.append(tick)\n self._ax1.set_yticks(newtick_lst)\n self._ax1.set_yticklabels(newticklabel_lst)\n self._ax1.set_ylim(_y22, _y11)\n self._ax1.set_xlabel('Pixel', fontsize=label_size)\n self._ax1.set_ylabel(u'\\u03bb (\\xc5)', fontsize=label_size)\n self._ax1.grid(True, ls=':', color='gray', alpha=1, lw=0.5)\n self._ax1.set_axisbelow(True)\n self._ax1._aperture_text.set_text('')\n for tick in self._ax1.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax1.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n # adjust axis layout for ax2 (residual on aperture space)\n self._ax2.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax2.axhline(y=i*std, color='k', ls=':', lw=0.5)\n x1, x2 = self._ax2.get_xlim()\n x1 = max(x1,aperture_lst.min())\n x2 = min(x2,aperture_lst.max())\n self._ax2.set_xlim(x1, x2)\n self._ax2.set_ylim(-6*std, 6*std)\n self._ax2.set_xlabel('Aperture', fontsize=label_size)\n self._ax2.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax2.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax2.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n ## adjust axis layout for ax3 (residual on pixel space)\n self._ax3.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax3.axhline(y=i*std, color='k', ls=':', lw=0.5)\n self._ax3.set_xlim(0, npixel-1)\n self._ax3.set_ylim(-6*std, 6*std)\n self._ax3.set_xlabel('Pixel', fontsize=label_size)\n self._ax3.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax3.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax3.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)", "def update_overlaid_plot(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n waveforms = [trigger, trace]\n\n first_peak, second_peak = self.get_windowed_data(waveforms[0], waveforms[1])\n self.overlaid_lines[0].set_ydata(first_peak)\n self.overlaid_lines[0].set_xdata(range(len(first_peak)))\n self.overlaid_lines[1].set_ydata(second_peak)\n self.overlaid_lines[1].set_xdata(range(len(second_peak)))\n\n areas = [integ.simps(first_peak), integ.simps(second_peak)]\n labels = ['%.1f' % areas[0], '%.1f' % areas[1]]\n\n# for area in areas:\n# if area < 0.1:\n# raise RangeError # calculation warning error for example\n self.ax2.legend([self.overlaid_lines[0], self.overlaid_lines[1]],\n labels)\n\n self.draw()", "def stack_plot(spec_list, offset = False, alpha=1.):\r\n\r\n import matplotlib.pyplot as plt\r\n\r\n offset_val = 0.\r\n for spec in spec_list:\r\n dat, errdat = read_spectrum(spec)\r\n plt.plot(dat['wave'], dat['flux']+offset_val, label = spec, alpha=alpha)\r\n if offset:\r\n offset_val -= np.median(dat['flux'])\r\n print spec\r\n plt.legend()\r\n plt.show()", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def plot_overscan_variation(t_lst, overscan_lst, figfile):\n \n # Quality check plot of the mean overscan value over time \n fig = plt.figure(figsize=(8,6), dpi=150)\n ax2 = fig.add_axes([0.1,0.60,0.85,0.35])\n ax1 = fig.add_axes([0.1,0.15,0.85,0.35])\n #conversion of the DATE-string to a number\n date_lst = [dateutil.parser.parse(t) for t in t_lst]\n datenums = mdates.date2num(date_lst)\n\n ax1.plot_date(datenums, overscan_lst, 'r-', label='mean')\n ax2.plot(overscan_lst, 'r-', label='mean')\n for ax in fig.get_axes():\n leg = ax.legend(loc='upper right')\n leg.get_frame().set_alpha(0.1)\n ax1.set_xlabel('Time')\n ax2.set_xlabel('Frame')\n ax1.set_ylabel('Overscan mean ADU')\n ax2.set_ylabel('Overscan mean ADU')\n # adjust x and y limit\n y11,y12 = ax1.get_ylim()\n y21,y22 = ax2.get_ylim()\n z1 = min(y11,y21)\n z2 = max(y21,y22)\n ax1.set_ylim(z1,z2)\n ax2.set_ylim(z1,z2)\n ax2.set_xlim(0, len(overscan_lst)-1)\n # adjust rotation angle of ticks in time axis\n plt.setp(ax1.get_xticklabels(),rotation=30)\n\n # save figure\n fig.savefig(figfile)\n plt.close(fig)", "def plot_ave(results_list):\n x_range = range(len(results_list[0]))\n err_x, err_y, std_list = [], [], []\n\n for i in x_range:\n if i % 10 == 0:\n #get average for each generation\n column = [] \n for result in results_list:\n column.append(result[i])\n average = np.average(column)\n \n std_dev = np.std(column)\n err_x.append(i)\n err_y.append(average)\n std_list.append(std_dev)\n\n pylab.errorbar(err_x, err_y, yerr=std_list)\n pylab.show()", "def sample_list3(data_list, rows=15, cols=4, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0):\n\n n_batch = len(data_list)\n _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])\n\n for ind in range(n_batch):\n # read data and calculate average precision\n input1 = data_list[ind]['slice1']\n input2 = data_list[ind]['slice2']\n label = data_list[ind]['label']\n hu0050 = data_list[ind]['hu0050']\n overlap = data_list[ind]['overlap']\n f_score = data_list[ind]['f1']\n mix_overlap = data_list[ind]['mix_overlap']\n noncal_eval = data_list[ind]['noncal_eval']\n file_path = data_list[ind]['file_path']\n if (ind - start_with) % show_every == 0:\n i = (ind - start_with) // show_every\n if i < rows:\n ax[i, 0].imshow(input1, cmap='gray')\n ax[i, 0].set_title(\"Slice {} ({}) \\n {}\".format(ind + start_inx, file_path, 'Input with HU(-100~155)'), loc='left')\n ax[i, 0].axis('off')\n\n ax[i, 1].imshow(input2, cmap='gray')\n ax[i, 1].set_title(\"{}\".format('Input with HU(200~1200)'))\n ax[i, 1].axis('off')\n\n ax[i, 2].imshow(gray2rgb(label))\n ax[i, 2].set_title('{}'.format('Label'))\n ax[i, 2].axis('off')\n\n ax[i, 3].imshow(gray2rgb(hu0050))\n ax[i, 3].set_title('{}'.format('Mask HU(0~50)'))\n ax[i, 3].axis('off')\n\n ax[i, 4].imshow(gray2rgb(overlap))\n ax[i, 4].set_title('{} (F1= {:.4f})'.format('Overlap', f_score))\n ax[i, 4].axis('off')\n\n # not all red pixels are within HU range 0~50\n\n if(np.sum(overlap == 76)) != 0:\n n_above50, n_below0, topk, buttomk = noncal_eval[0], noncal_eval[1], noncal_eval[2:7], noncal_eval[7:12]\n ax[i, 4].text(5, 30, \"top5 HU: {}\".format(topk), color='red')\n ax[i, 4].text(5, 60, \"but5 HU: {}\".format(buttomk), color='red')\n ax[i, 4].text(5, 90, \"Num of pixels HU>50: {}\".format(n_above50), color='red')\n ax[i, 4].text(5, 120, \"Num of pixels HU<0: {}\".format(n_below0), color='red')\n\n ax[i, 5].imshow(gray2rgb(mix_overlap))\n ax[i, 5].set_title('{} (F1= {:.4f})'.format('Label+Overlap', f_score))\n ax[i, 5].axis('off')\n\n # ax[i, 3].scatter(range(0, n_class), f_score)\n # ax[i, 3].set_title('Slice %d : Ave F-score = %0.2f' % (ind + start_inx, ave_f_score))\n # ax[i, 3].set_ylabel('F score')\n # ax[i, 3].set_ylim([-0.1, 1.1])\n\n # plt.show()\n if fig_name:\n plt.savefig(fig_name + '.pdf')\n plt.close()", "def plotBlockFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]):\n\n class BlockListFlux:\n def __init__(\n self, nGroup, blockList=[], adjoint=False, peak=False, primary=False\n ):\n self.nGroup = nGroup\n self.blockList = blockList\n self.adjoint = adjoint\n self.peak = peak\n self.avgHistogram = None\n self.eHistogram = None\n self.peakHistogram = None\n self.E = None\n\n if not blockList:\n self.avgFlux = numpy.zeros(self.nGroup)\n self.peakFlux = numpy.zeros(self.nGroup)\n self.lineAvg = \"-\"\n self.linePeak = \"-\"\n else:\n self.avgFlux = numpy.zeros(self.nGroup)\n self.peakFlux = numpy.zeros(self.nGroup)\n\n if self.adjoint:\n self.labelAvg = \"Average Adjoint Flux\"\n self.labelPeak = \"Peak Adjoint Flux\"\n else:\n self.labelAvg = \"Average Flux\"\n self.labelPeak = \"Peak Flux\"\n\n if primary:\n self.lineAvg = \"-\"\n self.linePeak = \"-\"\n else:\n self.lineAvg = \"r--\"\n self.linePeak = \"k--\"\n\n def calcAverage(self):\n for b in self.blockList:\n thisFlux = numpy.array(b.getMgFlux(adjoint=self.adjoint))\n self.avgFlux += numpy.array(thisFlux)\n if sum(thisFlux) > sum(self.peakFlux):\n self.peakFlux = thisFlux\n\n self.avgFlux = self.avgFlux / len(bList)\n\n def setEnergyStructure(self, upperEnergyBounds):\n self.E = [eMax / 1e6 for eMax in upperEnergyBounds]\n\n def makePlotHistograms(self):\n self.eHistogram, self.avgHistogram = makeHistogram(self.E, self.avgFlux)\n if self.peak:\n _, self.peakHistogram = makeHistogram(self.E, self.peakFlux)\n\n def checkSize(self):\n if len(self.E) != len(self.avgFlux):\n runLog.error(self.avgFlux)\n raise\n\n def getTable(self):\n return enumerate(zip(self.E, self.avgFlux, self.peakFlux))\n\n if bList is None:\n bList = core.getBlocks()\n bList = list(bList)\n if adjoint and bList2:\n runLog.warning(\"Cannot plot adjoint flux with bList2 argument\")\n return\n elif adjoint:\n bList2 = bList\n\n try:\n G = len(core.lib.neutronEnergyUpperBounds)\n except: # noqa: bare-except\n runLog.warning(\"No ISOTXS library attached so no flux plots.\")\n return\n\n BlockListFluxes = set()\n bf1 = BlockListFlux(G, blockList=bList, peak=peak, primary=True)\n BlockListFluxes.add(bf1)\n if bList2:\n bf2 = BlockListFlux(G, blockList=bList2, adjoint=adjoint, peak=peak)\n BlockListFluxes.add(bf2)\n\n for bf in BlockListFluxes:\n bf.calcAverage()\n bf.setEnergyStructure(core.lib.neutronEnergyUpperBounds)\n bf.checkSize()\n bf.makePlotHistograms()\n\n if fName:\n # write a little flux text file.\n txtFileName = os.path.splitext(fName)[0] + \".txt\"\n with open(txtFileName, \"w\") as f:\n f.write(\n \"{0:16s} {1:16s} {2:16s}\\n\".format(\n \"Energy_Group\", \"Average_Flux\", \"Peak_Flux\"\n )\n )\n for _, (eMax, avgFlux, peakFlux) in bf1.getTable():\n f.write(\"{0:12E} {1:12E} {2:12E}\\n\".format(eMax, avgFlux, peakFlux))\n\n if max(bf1.avgFlux) <= 0.0:\n runLog.warning(\n \"Cannot plot flux with maxval=={0} in {1}\".format(bf1.avgFlux, bList[0])\n )\n return\n\n plt.figure()\n plt.plot(bf1.eHistogram, bf1.avgHistogram, bf1.lineAvg, label=bf1.labelAvg)\n\n if peak:\n plt.plot(bf1.eHistogram, bf1.peakHistogram, bf1.linePeak, label=bf1.labelPeak)\n\n ax = plt.gca()\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n plt.xlabel(\"Energy (MeV)\")\n plt.ylabel(\"Flux (n/cm$^2$/s)\")\n\n if peak or bList2:\n plt.legend(loc=\"lower right\")\n\n plt.grid(color=\"0.70\")\n if bList2:\n if adjoint:\n plt.twinx()\n plt.ylabel(\"Adjoint Flux (n/cm$^2$/s)\", rotation=270)\n ax2 = plt.gca()\n ax2.set_yscale(\"log\")\n plt.plot(bf2.eHistogram, bf2.avgHistogram, bf2.lineAvg, label=bf2.labelAvg)\n if peak and not adjoint:\n plt.plot(\n bf2.eHistogram, bf2.peakHistogram, bf2.linePeak, label=bf2.labelPeak\n )\n plt.legend(loc=\"lower left\")\n plt.title(\"Group flux\")\n\n if fName:\n plt.savefig(fName)\n plt.close()\n report.setData(\n \"Flux Plot {}\".format(os.path.split(fName)[1]),\n os.path.abspath(fName),\n report.FLUX_PLOT,\n )\n else:\n plt.show()", "def plot_source_residuals_comparison(source_truth, source_model_list, name_list, \n vmin_res=-0.5, vmax_res=-0.5, cmap='cubehelix',\n fontsize=12):\n n_model = len(source_model_list)\n fig, axes = plt.subplots(1+n_model, 2, figsize=(9, (1+n_model)*3.5))\n axes[0, 1].axis('off')\n ax = axes[0, 0]\n #ax.get_xaxis().set_visible(False)\n #ax.get_yaxis().set_visible(False)\n ax.set_title(\"true source\", fontsize=fontsize)\n im = ax.imshow(source_truth, cmap=cmap, vmin=0)\n lims = (len(source_truth)/4, 3*len(source_truth)/4) # zoom a bit on the image\n #ax.set_xlim(*lims)\n #ax.set_ylim(*lims)\n plot_util.nice_colorbar(im, label=\"flux\", fontsize=fontsize)\n \n i = 1\n for source_model, name in zip(source_model_list, name_list):\n print(\"min/max for source model '{}': {}/{}\".format(name, source_model.min(), source_model.max()))\n\n residuals_source = source_model - source_truth\n\n ax = axes[i, 0]\n ax.set_title(\"model '{}'\".format(name), fontsize=fontsize)\n #ax.get_xaxis().set_visible(False)\n #ax.get_yaxis().set_visible(False)\n im = ax.imshow(source_model, cmap=cmap)\n #ax.set_xlim(*lims)\n #ax.set_ylim(*lims)\n plot_util.nice_colorbar(im, label=\"flux\", fontsize=fontsize)\n \n ax = axes[i, 1]\n #ax.get_xaxis().set_visible(False)\n #ax.get_yaxis().set_visible(False)\n ax.set_title(\"difference\", fontsize=fontsize)\n #ax.set_xlim(*lims)\n #ax.set_ylim(*lims)\n im = ax.imshow(residuals_source, cmap='RdBu_r', vmin=vmin_res, vmax=vmax_res)\n plot_util.nice_colorbar_residuals(im, residuals_source, vmin_res, vmax_res,\n label=r\"f${}_{\\rm model}$ - f${}_{\\rm truth}$\", fontsize=fontsize)\n \n print(\"SDR for model '{}' = {:.3f}\".format(name, metrics_util.SDR(source_truth, source_model)))\n i += 1\n\n return fig", "def plot_ewald_peak_distances(self, ewaldsphere, filename='output.png', plot_region=[None, None, None, None], plot_buffers=[0.16, 0.035, 0.16, 0.03], label_peaks=False, blanked_figure=False, peaks_present=None, max_hkl=10, thresh=0.01):\n \n\n # Plot styling\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['axes.labelsize'] = 30\n plt.rcParams['xtick.labelsize'] = 'xx-large'\n plt.rcParams['ytick.labelsize'] = 'xx-large'\n\n #plt.rcParams['axes.labelsize'] = 35\n #plt.rcParams['xtick.labelsize'] = 28\n #plt.rcParams['ytick.labelsize'] = 28\n\n\n fig = plt.figure(figsize=(7,7))\n #fig.subplots_adjust(left=0.17, bottom=0.15, right=0.97, top=0.94, wspace=0.2, hspace=0.2)\n #ax = plt.subplot(111)\n left_buf, right_buf, bottom_buf, top_buf = plot_buffers\n fig_width = 1.0-right_buf-left_buf\n fig_height = 1.0-top_buf-bottom_buf\n ax = fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height], aspect='equal' )\n\n \n\n if True:\n # Symmetry-peaks\n\n self.apply_rotation_z( -120.0 )\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0.7,0.7,0), linewidth=1.5 ) # Yellow peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0.7,0.7,0), linewidth=linewidth, alpha=alpha ) # Yellow peaks\n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n plt.text( x, y, s, size=12, color='0.6', horizontalalignment='left', verticalalignment='bottom' )\n\n\n self.apply_rotation_z( +240.0 )\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0,0.7,0.7), linewidth=1.5 ) # Blue-green peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0,0.7,0.7), linewidth=linewidth, alpha=alpha ) # Blue-green peaks\n \n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n plt.text( x, y, s, size=12, color='0.6', horizontalalignment='left', verticalalignment='bottom' )\n\n self.apply_rotation_z( -120.0 )\n\n \n \n # Regular peaks\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0,1,0), linewidth=1.5 ) # Green peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0,1,0), linewidth=linewidth, alpha=alpha ) # Green peaks\n \n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n if blanked_figure:\n plt.text( x, y, s, size=12, color='1.0', horizontalalignment='left', verticalalignment='bottom' )\n else:\n plt.text( x, y, s, size=12, color='0.0', horizontalalignment='left', verticalalignment='bottom' )\n \n \n \n # Axis scaling\n xi, xf, yi, yf = ax.axis()\n if plot_region[0] != None: xi = plot_region[0]\n if plot_region[1] != None: xf = plot_region[1]\n if plot_region[2] != None: yi = plot_region[2]\n if plot_region[3] != None: yf = plot_region[3]\n if plot_region[0]==None and plot_region[1]==None and plot_region[2]==None and plot_region[3]==None:\n xf = max( xi, xf, yi, yf )\n yf = xf\n xi = -xf\n yi = -yf\n \n \n # Show central meridian of Ewald sphere\n qxys, qzs = ewaldsphere.central_meridian_arc()\n plt.plot( qxys, qzs, '-', color='0.5', linewidth=0.5 )\n plt.plot( -1*qxys, qzs, '-', color='0.5', linewidth=0.5 )\n \n \n \n ax.axis( [xi, xf, yi, yf] )\n \n if blanked_figure:\n plt.xticks( [] )\n plt.yticks( [] )\n else:\n plt.xlabel( r'$q_{xy} \\, (\\mathrm{\\AA^{-1}})$', size=30 )\n plt.ylabel( r'$q_{z} \\, (\\mathrm{\\AA^{-1}})$', size=30 )\n \n \n plt.savefig( filename, transparent=blanked_figure ) \n plt.close()", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()", "def validate(self):\n if not self.checkUserInput():\n return\n # detect_peaks function requires mpd (= minimum peak distance)\n # mpw should be converted in mpd\n mpd = self.mpw * self.npt / self.xspan\n npk = ntry = 0\n while npk == 0 and ntry < 5:\n peakindx, _ = signal.find_peaks(self.Y, height=self.mph,\n distance=mpd, threshold=self.thres)\n npk = len(peakindx)\n if npk == 0:\n if self.thres == 0:\n break\n if ntry == 3:\n self.thres = 0.0\n else:\n self.thres /= 2\n ntry += 1\n if not npk:\n QtWidgets.QMessageBox.information(self, self.title, \"No peak found\")\n return\n msg = \"{0:d} peaks have been detected, \" \\\n \"do you want to continue ?\".format(npk)\n ans = QtWidgets.QMessageBox.information(self, self.title, msg,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n if ans == QtWidgets.QMessageBox.No:\n self.reject()\n else:\n self.peakindx = peakindx\n self.accept()", "def plot_cell_peak_detection(cell_df, peak_indices, **kwargs): \n cell_index = cell_df.cell_index.unique()[0]\n expt_name = kwargs.get('expt_name', cell_df.Label.unique()[0][0:12])\n filetype = kwargs.get('filetype', 'png')\n filename = f'{expt_name}_cell{cell_index}_manual_vs_auto_bud'\n try:\n manual_bud_indices = kwargs.get('manual_bud_indices')\n except:\n manual_bud_indices = files.read_roi_position_indices(files.select_file(\"Choose manual bud rois .zip\"))\n collection_interval = kwargs.get('collection_interval', 10)\n death_cutoff_hr = (np.max(manual_bud_indices)*collection_interval)/60\n linewidth = kwargs.get('linewidth', 0.8)\n hidden_spines = kwargs.get('hidden_spines', ['top', 'right'])\n xlim = kwargs.get('xlim', (0, 70))\n ylim = kwargs.get('ylim', (0.9, 1.1))\n ylim_raw = kwargs.get('ylim_raw', (1000, 5000))\n \n # Create figure on which to plot traces\n fig = plt.figure(figsize=(4, 3), tight_layout=True)\n fig.set_dpi(300)\n\n # Plot the automaticall discovered bud frames\n ax = fig.add_subplot(212)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.set_ylabel('Filtered')\n ax.set_xlabel('Time (hrs.)')\n\n ax.plot(cell_df.hours, cell_df.dsred_mean_local_mean_norm_medfilt,\n linewidth=linewidth, color='black')\n\n for peak_index in peak_indices:\n peak_hr = (peak_index*collection_interval)/60\n if peak_hr <= death_cutoff_hr:\n ax.axvline(peak_hr,\n linewidth=linewidth, color='black',\n linestyle='--')\n else:\n pass\n for spine in [ax.spines[name] for name in hidden_spines]:\n spine.set_visible(False)\n\n # Plot the manually discovered bud frames\n ax2 = fig.add_subplot(211)\n ax2.set_xlim(xlim)\n ax2.set_ylim(ylim_raw)\n ax2.set_yticks(np.linspace(ylim_raw[0], ylim_raw[1], 3))\n ax2.set_ylabel('Raw DsRed')\n\n ax2.plot(cell_df.hours, cell_df.dsred_mean,\n linewidth=linewidth, color='red', alpha=0.7)\n for frame in manual_bud_indices[:]:\n frame_hr = (frame*collection_interval)/60\n ax2.axvline(frame_hr,\n linewidth=linewidth, color='black',\n linestyle='--')\n for spine in [ax2.spines[name] for name in hidden_spines]:\n spine.set_visible(False)\n\n fig.savefig(f'{filename}.{filetype}')", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def analyze_wfs(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001, compact=True):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n # Ora faccio un loop sugli eventi..\n if compact:\n for event in range(0, len(self.table_sipm_time['ev']), 9):\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_dataframe = self.analyze_ev_wf_compact(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_dataframe], ignore_index=True)\n bar.update(counter+1)\n counter += 9\n else:\n for event in self.table_sipm_time['ev']:\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_time, peaks_ampl = self.analyze_ev_wf(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat([self.wf_peaks, pd.DataFrame(\n {'t': peaks_time, 'A': peaks_ampl})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n bar.finish()\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))", "def __init__(self, parent=None, pltw=None, cpos=None):\n super (noiseDlg, self).__init__(parent)\n\n self.parent = parent\n self.pltw = pltw\n self.cpos = cpos\n self.blkno = pltw.curvelist[cpos].yvinfo.blkpos\n self.xpos = pltw.curvelist[cpos].xvinfo.vidx\n self.ypos = pltw.curvelist[cpos].yvinfo.vidx\n self.npt = pltw.blklst[self.blkno][self.xpos].size\n self.data = np.vstack( (pltw.blklst[self.blkno][self.xpos],\n pltw.blklst[self.blkno][self.ypos],\n pltw.blklst[self.blkno][self.ypos],\n np.zeros(len(pltw.blklst[self.blkno][self.xpos]))) )\n self.dataSpan = getSpan(self.data[1])\n self.loc = 0.0\n self.scale = self.dataSpan * 0.01\n\n self.fig = Figure()\n self.canvas = FigureCanvas(self.fig)\n self.canvas.setParent(self)\n self.canvas.setFocus()\n self.mpl_toolbar = NavigationToolbar(self.canvas, self)\n\n self.axes = self.fig.add_subplot(111)\n # Location\n loclab = QtWidgets.QLabel(\"Location\")\n self.loctext = QtWidgets.QLineEdit(self)\n regex = QRegExp(\"[0-9]+.?[0-9]{,5}\")\n validator = QtGui.QRegExpValidator(regex, self.loctext)\n self.loctext.setValidator(validator)\n # Scale\n scalelab = QtWidgets.QLabel(\"Scale\")\n self.scaletext = QtWidgets.QLineEdit(self)\n validator = QtGui.QRegExpValidator(regex, self.scaletext)\n self.scaletext.setValidator(validator)\n # Buttons\n exeBtn = QtWidgets.QPushButton(\"Compute\")\n okBtn = QtWidgets.QPushButton(\"OK\")\n cancelBtn = QtWidgets.QPushButton(\"Cancel\")\n\n # set the layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.canvas)\n vbox.addWidget(self.mpl_toolbar)\n hbox = QtWidgets.QHBoxLayout()\n hbox.addWidget(loclab)\n hbox.addWidget(self.loctext)\n hbox.addWidget(scalelab)\n hbox.addWidget(self.scaletext)\n hbox.addWidget(exeBtn)\n hbox.addWidget(okBtn)\n hbox.addWidget(cancelBtn)\n vbox.addLayout(hbox)\n self.setLayout(vbox)\n\n # Connect buttons to callback functions\n exeBtn.clicked.connect(self.compute)\n okBtn.clicked.connect(self.validate)\n cancelBtn.clicked.connect(self.reject)\n self.setWindowTitle('Add Noise Tool')\n\n self.initWidgets()\n self.compute()\n QTimer.singleShot(5000, self.istest)", "def plot_rsfs_waveforms(peak_waveform, durations, labels):\n if np.mean(durations[np.where(labels==0)[0]]) < np.mean(durations[np.where(labels==1)[0]]):\n fs_k = 0;rs_k = 1\n waveform_class_ids = [1,0]\n else:\n rs_k = 0;fs_k = 1\n waveform_class_ids = [0,1]\n waveform_class = [waveform_class_ids[k] for k in labels]\n waveform_class = np.array(waveform_class)\n\n\n plt.figure(figsize=(6,4))\n for i in range(len(peak_waveform)):\n waveform = peak_waveform[i]\n if waveform_class[i]==np.unique(waveform_class)[0]:\n plt.plot(waveform/np.max(np.abs(waveform)),'#b3b3ff',alpha=0.7)\n if waveform_class[i]==np.unique(waveform_class)[1]:\n plt.plot(waveform/np.max(np.abs(waveform)),'#c6ecc6',alpha=0.7)\n\n\n # plot means, normalized\n for waveform_class_id in np.unique(waveform_class):\n plt.plot(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)/\n (np.max(np.abs(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)))),lw=3,label=waveform_class_id)\n plt.title('Raw: RS:'+str(len(np.where(waveform_class==0)[0]))+', FS: '+str(len(np.where(waveform_class==1)[0])))\n return waveform_class", "def plot_three(spectrum, thresh=1):\n plt.figure(figsize=(10, 4))\n plt.subplot(1,3,1)\n spectrum.plot()\n plt.subplot(1,3,2)\n plot_angle(spectrum, thresh=thresh)\n plt.subplot(1,3,3)\n wave = spectrum.make_wave()\n wave.unbias()\n wave.normalize()\n wave.segment(duration=0.01).plot()\n display(wave.make_audio())", "def plot_diagnostics(foldspec, flag, mask):\n \n plt.figure(figsize=(15,10))\n \n plt.subplot(231)\n plt.plot(foldspec.mean(0).mean(0), color='k')\n plt.xlabel('phase (bins)')\n plt.ylabel('I (arb.)')\n plt.title('Pulse Profile')\n plt.xlim(0, foldspec.shape[-1])\n \n plt.subplot(232)\n plt.title('RFI flagging parameter (log10)')\n plt.xlabel('time (bins)')\n plt.ylabel('freq (bins)')\n plt.imshow(np.log10(flag).T, aspect='auto')\n\n plt.subplot(233)\n plt.title('Manual off-gate scaling')\n plt.imshow(mask.T, aspect='auto', cmap='Greys')\n plt.xlabel('time (bins)')\n \n plt.subplot(234)\n plt.imshow(foldspec.mean(0), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('freq')\n\n plt.subplot(235)\n plt.imshow(foldspec.mean(1), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('time')\n\n plt.subplot(236)\n plt.imshow(foldspec.mean(2).T, aspect='auto')\n plt.xlabel('time')\n plt.ylabel('freq')", "def update_spec_plot(self):\n if self.img is None:\n return\n\n p = self.params\n pos = np.array(self.roi.pos(), dtype=int)\n size = np.array(self.roi.size(), dtype=int)\n imin = np.clip(pos, 0, self.cube.shape[1])\n imax = np.clip(pos + size, 0, self.cube.shape[2])\n print('Extract mean spectrum for {}'.format(list(zip(imin, imax))))\n data = self.cube[:, imin[1]:imax[1], imin[0]:imax[0]]\n self.spec = spec = data.mean(axis=(1, 2))\n self.specplot.clearPlots()\n\n if p['Sky', 'Show'] and self.sky is not None:\n sp = self.sky.data.data * (2 * spec.data.max()) + spec.data.min()\n self.specplot.plot(sp, pen=p['Sky', 'Line Color'])\n\n self.specplot.plot(spec.data.data, pen=p['Spectrum', 'Line color'])\n\n if p['Median filter', 'Show']:\n sp = spec.median_filter(p['Median filter', 'Kernel Size'])\n self.specplot.plot(sp.data.data, pen={\n 'color': p['Median filter', 'Line Color'],\n 'width': p['Median filter', 'Line Size']\n })\n\n # self.specplot.autoRange()\n if self.zoomplot is not None:\n self.update_zoom_spec_from_region()", "def plotFluorAroundPeaks(fluor_data, time_stamps, peak_inds,\n clip_window, clip_window_origin,\n output_dir, name, movie_start_time):\n before_ind = np.where(time_stamps>clip_window[0])[0][0]\n after_ind = np.where(time_stamps>clip_window[1])[0][0]\n\n dir = get_output_file(name, output_dir)\n dir = dir + '_peakplot/'\n check_dir(dir)\n \n if clip_window_origin == 'peak':\n\n plot_indiv_peaks = True\n iter = 0\n if plot_indiv_peaks:\n for ind in peak_inds:\n fluor = fluor_data[ind-before_ind:ind+after_ind]\n timestamps = time_stamps[ind-before_ind:ind+after_ind]\n\n plt.figure()\n plt.plot(timestamps, fluor)\n plt.axvline(x=time_stamps[ind])\n plt.xlim(time_stamps[ind] - clip_window[0], time_stamps[ind] + clip_window[1])\n\n mins, seconds, frames, peak_time_string = timeToMSF(movie_start_time + time_stamps[ind])\n mins, seconds, frames, start_time_string = timeToMSF(movie_start_time + time_stamps[ind] - clip_window[0])\n mins, seconds, frames, end_time_string = timeToMSF(movie_start_time + time_stamps[ind] + clip_window[1])\n\n plt.title('(With start time) Peak- ['+peak_time_string+\n '], Window- ['+start_time_string+', '+end_time_string+']')\n\n plt.savefig(dir+str(iter))\n print \"plotFluorAroundPeaks: \", dir+str(iter)\n iter += 1\n \n end_time = time_stamps[-1]\n # mins = int(np.floor(time_stamps[-1]/60))\n # seconds = int(np.floor(time_stamps[-1] - mins*60))\n # ms = time_stamps[-1] - seconds - mins*60\n # frames = int(np.floor(ms*30))\n mins, seconds, frames, time_string = timeToMSF(end_time)\n\n plt.figure()\n plt.plot(time_stamps, fluor_data)\n plt.title('Full time series. Time: '+str(end_time)+', '+str(mins)+':'+str(seconds)+':'+str(frames))\n plt.savefig(dir+'full')", "def plot_redshift_peaks(fig_size,\n funcion,\n wavelength,\n lmin,\n lmax,\n fmin,\n fmax,\n cut,\n peaks,\n peaks_name,\n label, \n show_plot=False):\n fig, ax = plt.subplots(figsize=(fig_size, fig_size / 2.5))\n ax.plot(wavelength, funcion, \"r\", lw=1, alpha=0.5)\n \n ax.set_xlabel(r\"Wavelength [$\\AA$]\")\n ax.set_ylabel(\"Flux / continuum\")\n\n ax.set_xlim(lmin, lmax)\n ax.set_ylim(fmin, fmax)\n ax.axhline(y=cut, color=\"k\", linestyle=\":\", alpha=0.5)\n for i in range(len(peaks)):\n ax.axvline(x=peaks[i], color=\"k\", linestyle=\":\", alpha=0.5)\n label = peaks_name[i]\n ax.text(peaks[i], 1.8, label)\n\n if show_plot:\n plt.show()\n return fig", "def testPlots(self):\n\t\tself.watcher.analyze(layers=[67], plot=True, randomize=True)", "def test_9(self):\n\n sq_qe = gen_step_qe(1.42, 0.9)\n test_ill = Illumination()\n # test_qef = qe_filter(sq_qe)\n\n filtered_ill = test_ill * sq_qe\n\n assert isinstance(filtered_ill, Illumination)\n\n #plt.plot(filtered_ill.get_spectrum('eV')[0, :], filtered_ill.get_spectrum('eV')[1, :], label=\"filtered\")\n #plt.plot(test_ill.get_spectrum('eV')[0, :], test_ill.get_spectrum('eV')[1, :], label=\"original\")\n\n #plt.xlabel('wavelength (eV)')\n #plt.ylabel('spectrum (W/eV/m^2)')\n\n #plt.legend()\n\n #plt.show()", "def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()" ]
[ "0.6153209", "0.58707404", "0.57681173", "0.5617294", "0.55997247", "0.5560818", "0.55468017", "0.54887295", "0.5438249", "0.5410866", "0.5409235", "0.53832775", "0.5370036", "0.5339262", "0.5334498", "0.5330733", "0.5290526", "0.52650946", "0.52564365", "0.5254666", "0.52228355", "0.5186187", "0.5182282", "0.5171851", "0.51702785", "0.5160341", "0.51561224", "0.51474553", "0.51304615", "0.51275265" ]
0.7009614
0
Receive data until the specified string of bytes is bytes is found. The needle is not stripped from the data.
def recvuntil(self, needle: bytes) -> bytes: data = b"" # We read one byte at a time so we don't overshoot the goal while not data.endswith(needle): next_byte = self.recv(1) if next_byte is not None: data += next_byte return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n \n pos = self.buff.find(data)\n rval = self.buff[:pos + len(data)]\n self.buff = self.buff[pos + len(data):]\n \n return rval", "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n \n pos = self.buff.find(data)\n rval = self.buff[:pos + len(data)]\n self.buff = self.buff[pos + len(data):]\n \n return rval", "def read_until(self, data):\n\n while not data in self.buff:\n self.buff += self.socket.recv(1024)\n\n pos = self.buff.find(data)\n rval = self.buff[: pos + len(data)]\n self.buff = self.buff[pos + len(data) :]\n\n return rval", "def read_until(steg_bytes: bytes, offset: int, ending: str):\r\n # Create a variable to hold the bytes read\r\n bytes_read = b\"\"\r\n\r\n # Loop through the steg_bytes\r\n while offset < len(steg_bytes):\r\n # Check if the current byte is the ending byte sequence\r\n if steg_bytes[offset:offset + len(ending)] == ending.encode():\r\n # Return the bytes read and the offset of the ending byte sequence\r\n return bytes_read, offset\r\n # Read the next byte\r\n bytes_read += steg_bytes[offset:offset + 1]\r\n offset += 1", "def read_until(self, s, timeout=None):\n self.read_cond(lambda x: s in x.buf, timeout)\n end = self.buf.find(s) + len(s)\n res = self.buf[:end]\n self.buf = self.buf[end:]\n return res", "def read_until(\n self,\n min_num_bytes: int,\n ending: bytes,\n timeout: float = 10.0,\n data_consumer=None,\n ):\n\n data = b''\n\n # If a miniumum number of bytes is given, wait till at least\n # that number of bytes are received. If the value is 0, then\n # continue, and rely on the terminator and timeout values.\n if min_num_bytes:\n data = self.con.read(min_num_bytes)\n # debug(f'read {data=}')\n if data_consumer:\n data_consumer(data)\n\n timeout_count = 0\n while True:\n if ending and data.endswith(ending):\n break\n else:\n # debug(f\"{ending=} was not found\")\n pass\n\n if self.con.inWaiting() > 0:\n new_data = self.con.read(1)\n # debug(f'read {new_data=}')\n data = data + new_data\n # if len(data) > 80:\n # debug(f'data: len={len(data)} {data[-80:]=}')\n # else:\n # debug(f'data: len={len(data)} {data=}')\n if data_consumer:\n data_consumer(new_data)\n # timeout_count = 0\n else:\n timeout_count += 1\n # debug(f'{timeout_count=}')\n if timeout is not None and timeout_count >= 100 * timeout:\n if not data:\n debug(f\"TIMEOUT - No data received within {timeout} seconds\")\n else:\n debug(f\"TIMEOUT - data {data} did not end with {ending}\")\n break\n time.sleep(0.01)\n debug(f\"read_until returns {data=}\")\n return data", "def read_until_sub(self, sub=None):\n sub = sub or b'\\n'\n with self.reading:\n offset = 0\n while True:\n data = self.read_buffer.slice()\n find_offset = data[offset:].find(sub)\n if find_offset >= 0:\n break\n offset = max(0, len(data) - len(sub))\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return(self.read_buffer.dequeue(offset + find_offset + len(sub)))", "def skip_until(self, s, timeout=None):\n self.read_cond(lambda x: s in x.buf, timeout)\n start = self.buf.find(s)\n self.buf = self.buf[start:]\n return", "def recv_until(sock, suffix):\n message = sock.recv(4)\n if not message:\n raise EOFError('socket closed')\n while not message.endswith(suffix):\n data = sock.recv(4)\n if not data:\n raise IOError('received {!r} then socket closed'.format(message))\n message += data\n return message", "def read_until(self, match, timeout=None):\n n = len(match)\n self.process_rawq()\n i = self.cookedq.find(match)\n if i >= 0:\n i = i+n\n buf = self.cookedq[:i]\n self.cookedq = self.cookedq[i:]\n return buf\n if timeout is not None:\n deadline = _time() + timeout\n with _SshSelector() as selector:\n selector.register(self, selectors.EVENT_READ)\n while not self.eof:\n if selector.select(timeout):\n i = max(0, len(self.cookedq)-n)\n self.fill_rawq()\n self.process_rawq()\n i = self.cookedq.find(match, i)\n if i >= 0:\n i = i+n\n buf = self.cookedq[:i]\n self.cookedq = self.cookedq[i:]\n return buf\n if timeout is not None:\n timeout = deadline - _time()\n if timeout < 0:\n break\n return self.read_very_lazy()", "def searchBytes(self, needle, start_address=0, end_address=None):\n if start_address % 8:\n raise InputStreamError(\n \"Unable to search bytes with address with bit granularity\")\n length = len(needle)\n size = max(3 * length, 4096)\n buffer = b''\n\n if self._size and (end_address is None or self._size < end_address):\n end_address = self._size\n\n while True:\n if end_address is not None:\n todo = (end_address - start_address) >> 3\n if todo < size:\n if todo <= 0:\n return None\n size = todo\n data = self.readBytes(start_address, size)\n if end_address is None and self._size:\n end_address = self._size\n size = (end_address - start_address) >> 3\n assert size > 0\n data = data[:size]\n start_address += 8 * size\n buffer = buffer[len(buffer) - length + 1:] + data\n found = buffer.find(needle)\n if found >= 0:\n return start_address + (found - len(buffer)) * 8", "def _readuntil(f, end=_TYPE_END):\n\tbuf = bytearray()\n\twhile True:\n\t\tbyte = f.read(1)\n\t\tif byte != end:\n\t\t\tbuf += byte\n\t\telse:\n\t\t\tbreak\n\treturn buf", "def read_until(string, untilseq=\"\"):\n idx = string.index(untilseq)\n return string[:idx]", "def get_until(self, sub, limit, offset=0):\n search_offset = offset\n\n end = None\n\n while len(self.buff) < limit:\n\n index = self.buff.find(sub, search_offset)\n\n if index >= 0:\n end = index + len(sub)\n break\n\n if len(self.chunkbuffer) <= 0:\n return None\n\n chunk = self.chunkbuffer.pop()\n\n self.buff.extend(chunk)\n\n if not end:\n raise IndexError(\"Sub not found within limits\")\n\n data = self.buff[offset:end]\n\n self.autocommit_amount = end\n\n return data", "def recv_until(self, delimiter, timeout=_UNSET, maxsize=_UNSET,\n with_delimiter=False):\n with self._recv_lock:\n if maxsize is _UNSET:\n maxsize = self.maxsize\n if maxsize is None:\n maxsize = _RECV_LARGE_MAXSIZE\n if timeout is _UNSET:\n timeout = self.timeout\n len_delimiter = len(delimiter)\n\n sock = self.sock\n recvd = bytearray(self.rbuf)\n start = time.time()\n find_offset_start = 0 # becomes a negative index below\n\n if not timeout: # covers None (no timeout) and 0 (nonblocking)\n sock.settimeout(timeout)\n try:\n while 1:\n offset = recvd.find(delimiter, find_offset_start, maxsize)\n if offset != -1: # str.find returns -1 when no match found\n if with_delimiter: # include delimiter in return\n offset += len_delimiter\n rbuf_offset = offset\n else:\n rbuf_offset = offset + len_delimiter\n break\n elif len(recvd) > maxsize:\n raise MessageTooLong(maxsize, delimiter) # see rbuf\n if timeout:\n cur_timeout = timeout - (time.time() - start)\n if cur_timeout <= 0.0:\n raise socket.timeout()\n sock.settimeout(cur_timeout)\n nxt = sock.recv(self._recvsize)\n if not nxt:\n args = (len(recvd), delimiter)\n msg = ('connection closed after reading %s bytes'\n ' without finding symbol: %r' % args)\n raise ConnectionClosed(msg) # check the recv buffer\n recvd.extend(nxt)\n find_offset_start = -len(nxt) - len_delimiter + 1\n except socket.timeout:\n self.rbuf = bytes(recvd)\n msg = ('read %s bytes without finding delimiter: %r'\n % (len(recvd), delimiter))\n raise Timeout(timeout, msg) # check the recv buffer\n except Exception:\n self.rbuf = bytes(recvd)\n raise\n val, self.rbuf = bytes(recvd[:offset]), bytes(recvd[rbuf_offset:])\n return val", "def __recvall(self, bytes):\r\n data = \"\"\r\n while len(data) < bytes:\r\n data = data + self.recv(bytes-len(data))\r\n return data", "def read_until(self, delimiter, timeout=None):\n timeout_cnt = 0\n if timeout is not None:\n self._conn.settimeout(timeout)\n while True:\n delimiter_pos = self._buffer.find(delimiter)\n if delimiter_pos == -1:\n try:\n received = self._conn.recv(4096)\n timeout_cnt = 0\n except socket.timeout as exc:\n timeout_cnt += 1\n if timeout_cnt >= self.timeout_limit:\n raise TS3ConnectionClosedException(\n \"Socket connection timeout limit received!\"\n ) from exc\n continue\n if len(received) == 0:\n raise TS3ConnectionClosedException(\"Socket connection was closed!\")\n self._buffer += received\n else:\n break\n data = self._buffer[: delimiter_pos + len(delimiter)]\n self._buffer = self._buffer[delimiter_pos + len(delimiter) :]\n if timeout is not None:\n self._conn.settimeout(self.timeout)\n return data", "def read_next_line(data_socket):\r\n current_byte = next_byte(data_socket)\r\n found_line = b''\r\n while current_byte != b'\\x0a':\r\n found_line += current_byte\r\n current_byte = next_byte(data_socket)\r\n return found_line", "def read_until_regex(self, regex):\n with self.reading:\n while True:\n data = self.read_buffer.slice()\n match = regex.search(data)\n if match:\n break\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return((self.read_buffer.dequeue(match.end()), match))", "def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode, matchLimit: int) -> List[ghidra.program.model.address.Address]:\n ...", "def __tcp_recv(self):\n total_data = []\n bs = 1024\n try:\n data = self.__sock.recv(bs)\n total_data.append(data)\n while True and data:\n if not re.search(\"L: (\\d+)\",data) and not data[-4:] == '\\r\\n\\r\\n':\n data = self.__sock.recv(bs)\n total_data.append(data)\n elif not re.search(\"L: (\\d+)\",data) and data[-4:] == '\\r\\n\\r\\n':\n return total_data\n else:\n break\n \n\n while re.search(\"L: (\\d+)\",data):\n n = len(data)\n L = int(re.findall(\"L: (\\d+)\",data)[-1])\n p = data.rfind('\\r\\n\\r\\n')\n abc = data\n data = ''\n\n p1 = data.rfind(str(L))\n if p < p1:\n log(\"rn before L\")\n left = L + n - (p1 + len(str(L))) + 4\n\n else:\n left = L - (n - p -4)\n if left == L:\n log(\"It happened!\")\n break\n\n #if more bytes then last L\n #come across another command: BN etc.\n #read until another L come\n if left < 0:\n log('abc')\n d = ''\n left = 0\n while True:\n d = self.__sock.recv(bs)\n data += d\n if re.search(\"L: (\\d+)\",d):\n break\n log(\"read left bytes\")\n log('data:'+data)\n total_data.append(data)\n\n #read left bytes in last L\n while left:\n data = self.__sock.recv(left)\n n = len(data)\n left = left - n\n\n if not data:\n break\n total_data.append(data)\n\n except socket.error,e:\n #self.__sock.close()\n raise PyFetionSocketError(e)\n\n return self.__split(''.join(total_data))\n\n #return ''.join(total_data)", "def recvtil(self, delim):\n buf = b''\n # TODO maybe not make this O(n**2)...\n while not delim in buf:\n buf += self.recv(1)\n return buf", "def _slurp_until(self, char='G', timeout=10, sleep=0.1):\n num = 0\n end = time.time() + timeout\n while time.time() < end:\n while self.ser.inWaiting():\n c = self.ser.read()\n num += 1\n if c == char:\n return num\n was_slurp = True\n time.sleep(sleep)\n return 0", "async def readuntil_re(self, regex, start=0):\n self.logger.debug(\"readuntil_re: %s\", regex)\n\n try:\n match = await self.wait_for(lambda data: regex.search(data, start))\n\n m_beg, m_end = match.span()\n # We are matching against the data stored stored in bytebuffer\n # The bytebuffer is manipulated in place. After we read the data\n # the buffer may get overwritten. The match object seems to be\n # directly referring the data in bytebuffer. This causes a problem\n # when we try to find the matched groups in match object.\n #\n # In [38]: data = bytearray(b\"localhost login:\")\n #\n # In [39]: rex = re.compile(b'(?P<login>.*((?<!Last ).ogin|.sername):)|(?P<passwd>\\n.*assword:)|(?P<prompt>\\n.*[%#>])|(?P<ignore>( to cli \\\\])|(who is on this device.\\\\]\\r\\n)|(Press R\n # ...: ETURN to get started\\r\\n))\\\\s*$')\n #\n # In [40]: m = rex.search(data)\n #\n # In [41]: m.groupdict()\n # Out[41]: {'ignore': None, 'login': b'localhost login:', 'passwd': None, 'prompt': None}\n #\n # In [42]: data[:]=b'overwrite'\n #\n # In [43]: m.groupdict()\n # Out[43]: {'ignore': None, 'login': b'overwrite', 'passwd': None, 'prompt': None}\n #\n groupdict = match.groupdict()\n rdata = await self.read(m_end)\n data = rdata[:m_beg] # Data before the regex match\n matched = rdata[m_beg:m_end] # portion that matched regex\n except AssertionError:\n if self._eof:\n # We are at the EOF. Read the whole buffer and send it back\n data = await self.read(len(self._buffer))\n matched = b\"\"\n match = None\n groupdict = None\n else:\n # re-raise the exception\n raise\n\n return ResponseMatch(data, matched, groupdict, match)", "def dataReceived(self, data):\n # Try to minimize string copying (via slices) by keeping one buffer\n # containing all the data we have so far and a separate offset into that\n # buffer.\n alldata = self._unprocessed + data\n currentOffset = 0\n prefixLength = self.prefixLength\n fmt = self.structFormat\n self._unprocessed = alldata\n\n while len(alldata) >= (currentOffset + prefixLength) and not self.paused:\n messageStart = currentOffset + prefixLength\n length, = unpack(fmt, alldata[currentOffset:messageStart])\n if length > self.MAX_LENGTH:\n self._unprocessed = alldata\n self._compatibilityOffset = currentOffset\n self.lengthLimitExceeded(length)\n return\n messageEnd = messageStart + length\n if len(alldata) < messageEnd:\n break\n\n # Here we have to slice the working buffer so we can send just the\n # netstring into the stringReceived callback.\n packet = alldata[messageStart:messageEnd]\n currentOffset = messageEnd\n self._compatibilityOffset = currentOffset\n self.stringReceived(packet)\n\n # Check to see if the backwards compat \"recvd\" attribute got written\n # to by application code. If so, drop the current data buffer and\n # switch to the new buffer given by that attribute's value.\n if 'recvd' in self.__dict__:\n alldata = self.__dict__.pop('recvd')\n self._unprocessed = alldata\n self._compatibilityOffset = currentOffset = 0\n if alldata:\n continue\n return\n\n # Slice off all the data that has been processed, avoiding holding onto\n # memory to store it, and update the compatibility attributes to reflect\n # that change.\n self._unprocessed = alldata[currentOffset:]\n self._compatibilityOffset = 0", "def read_until(self, expected=serial.LF, size=None) -> bytes:\n logger.debug(f\"read_until(expected={expected}, size={size})\")\n return self._con.read_until(expected=expected, size=size)", "def recv_chunk(self, data):", "def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode) -> ghidra.program.model.address.Address:\n ...", "def worker_serial_read(self):\r\n while self.active_flag.is_set():\r\n if not self.data_recieved_flag.is_set() and self.serial_data.in_waiting > 0:\r\n # strtmp=self.serial_data.read_until(b'\\x02\\x01\\x04\\x03\\x06\\x05\\x08\\x07');\r\n strtmp = self.serial_data.read_all()\r\n if (strtmp != b''):\r\n # self.buffer_busy_flag.wait();\r\n self.buffer_busy_flag.clear()\r\n # self.recieved_data=[self.recieved_data,strtmp];\r\n self.recieved_data = strtmp\r\n self.buffer_busy_flag.set()\r\n self.data_recieved_flag.set()\r\n else:\r\n time.sleep(0.001)\r\n\r\n return", "def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)" ]
[ "0.7999478", "0.7999478", "0.79382706", "0.7246821", "0.6910376", "0.67496634", "0.6570922", "0.6418232", "0.64071643", "0.6406031", "0.6261758", "0.6165734", "0.614623", "0.607534", "0.60666174", "0.6056381", "0.6013656", "0.5945416", "0.58822405", "0.5880646", "0.5843492", "0.5823041", "0.5819656", "0.5814564", "0.5780729", "0.57750225", "0.576408", "0.57376367", "0.5731877", "0.5730604" ]
0.82938015
0
define distance_between function to calculate distance between 2 agents
def distance_between(agents_row_a, agents_row_b): return (((agents_row_a._y - agents_row_b._y)**2) + ((agents_row_a._x - agents_row_b._x)**2))**0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_distance(self, agent):\n if self.type == 'Line':\n return self._line_intersection(self.params, (agent._position.x, agent._position.y))\n elif self.type == 'Circle':\n return self._circle_intersection(self.params, (agent._position.x, agent._position.y))\n elif self.type == 'Rect':\n x, y, w, h = self.params\n candidates = dict()\n d1, p1 = self._line_intersection((x, y, x+w, y), (agent._position.x, agent._position.y))\n d2, p2 = self._line_intersection((x, y, x, y+h), (agent._position.x, agent._position.y))\n d3, p3 = self._line_intersection((x+w, y, x+w, y+h), (agent._position.x, agent._position.y))\n d4, p4 = self._line_intersection((x, y+h, x+w, y+h), (agent._position.x, agent._position.y))\n candidates[d1] = p1\n candidates[d2] = p2\n candidates[d3] = p3\n candidates[d4] = p4\n\n keylist = candidates.keys()\n keylist.sort()\n\n return keylist[0], candidates[keylist[0]]", "def get_distance_between(\n node1,\n node2,\n distance_between=False,\n bounding_box=False,\n rotate_pivot=False,\n):\n if distance_between:\n dist = cmds.createNode(\"distanceBetween\")\n cmds.connectAttr(node1 + \".worldMatrix[0]\", dist + \".inMatrix1\")\n cmds.connectAttr(node2 + \".worldMatrix[0]\", dist + \".inMatrix2\")\n value = cmds.getAttr(dist + \".distance\")\n cmds.delete(dist)\n return value\n\n if bounding_box:\n node1 = cmds.xform(\n node1, query=True, bounding_box=True, worldSpace=True\n )\n node2 = cmds.xform(\n node2, query=True, bounding_box=True, worldSpace=True\n )\n\n elif rotate_pivot:\n node1 = cmds.xform(\n node1, query=True, worldSpace=True, rotate_pivot=True\n )\n node2 = cmds.xform(\n node2, query=True, worldSpace=True, rotate_pivot=True\n )\n\n else:\n node1 = cmds.xform(\n node1, query=True, translation=True, worldSpace=True\n )\n node2 = cmds.xform(\n node2, query=True, translation=True, worldSpace=True\n )\n\n value = (\n (node1[0] - node2[0]) ** 2\n + (node1[1] - node2[1]) ** 2\n + (node1[2] - node2[2]) ** 2\n ) ** 0.5\n\n return value", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def _distance_between(self, n1, n2):\n return cartesian_distance(n1[0], n1[1], n2[0], n2[1])", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance(self, a, b):\n raise NotImplementedError()", "def distance_between_actor(vehicle, ped):\n\n return distance_to_vehicle(vehicle, ped.get_location())", "def distance(self, a, b):\n \n # -----------------------------\n # Your code\n '''R = 3963 # radius of Earth (miles)\n lat1, lon1 = math.radians(a[0]), math.radians(a[1])\n lat2, lon2 = math.radians(b[0]), math.radians(b[1])\n \n return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R*0.000621371'''\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n \n \n # -----------------------------", "def distance_between(pt1: tuple, pt2: tuple) -> float:\r\n\r\n return ((pt2[1] - pt1[1])**2 + (pt2[0] - pt1[0])**2)**0.5", "def distance_between(x1: float, y1: float, x2: float, y2: float) -> float:\n return distance_between_sq(x1, y1, x2, y2)**0.5", "def _calc_distance(r1, r2):\n return np.linalg.norm(r1 - r2)", "def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def distance(a, b):\n ax, ay = a\n bx, by = b\n dx = bx - ax\n dy = by - ay\n return (abs(dx) + abs(dy) + abs(dx - dy)) / 2", "def distance(self, other):\n ...", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def distance_between(self, first_node_object, second_node_object):\n\n (first_column, first_row) = first_node_object\n (second_column, second_row) = second_node_object\n\n return numpy.sqrt((first_row - second_row) ** 2 +\n (first_column - second_column) ** 2)", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def distance(self,x,y,**kwargs):\n pass", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def dist(a, b):\n base_url=\"https://route.api.here.com/routing/7.2/calculateroute.json?\"\n payload = {'app_id':HERE_ID, \n 'app_code':HERE_CODE,\n 'waypoint0':'geo!'+','.join([str(i) for i in a]),\n 'waypoint1':'geo!'+','.join([str(i) for i in b]),\n 'mode':'fastest;car;traffic:disabled',\n }\n resp = requests.get(base_url, params=payload)\n data = json.loads(resp.content)\n #import ipdb; ipdb.set_trace()\n summary = data['response']['route'][0]['summary']\n return {\"distance\" : summary['distance'], \n \"trafficTime\" : summary[\"trafficTime\"],\n \"baseTime\" : summary[\"baseTime\"]}", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def dwithin(a, b, distance, **kwargs):\n return lib.dwithin(a, b, distance, **kwargs)" ]
[ "0.67812973", "0.67544675", "0.6624835", "0.65831345", "0.6558213", "0.6517963", "0.6490686", "0.64873284", "0.64811265", "0.645943", "0.6455973", "0.6448196", "0.6403538", "0.63928527", "0.6379261", "0.6374585", "0.63732916", "0.6372178", "0.63591474", "0.63575906", "0.6353674", "0.6350206", "0.6316433", "0.6313455", "0.63080394", "0.63029945", "0.6292993", "0.62884057", "0.62845796", "0.62748367" ]
0.7354095
0
Prepare the prmtop and inpcrd files of the selected list of spawns.
def prepare_spawns(self, spawns, epoch): sim_count = 1 basedir = os.getcwd() spawn_folder_names = [] for traj_id, frame_id in spawns: logger.info('Building simulation {} of epoch {}'.format(sim_count, epoch)) folder_name = 'e{:02d}s{:02d}_{}f{:04d}'.format(epoch, sim_count, traj_id, frame_id) destination = os.path.join(self.input_folder, folder_name) create_folder(destination) spawn_folder_names.append(destination) if not self.from_solvated: # Add files from build folder to destination folder so tleap # can read them since we're not retrieving frame from an # already solvated trajectory create_symlinks( files=os.path.join(self.build_folder, '*'), dst_folder=os.path.realpath(destination) ) # All files in destination, so now move into it os.chdir(destination) # Structure if self.from_solvated: outfile = 'seed.ncrst' else: outfile = 'seed.pdb' write_cpptraj_script( traj=os.path.relpath( os.path.join( basedir, self.meta.loc[traj_id]['traj_fn'] ) ), top=os.path.relpath( os.path.join( basedir, self.meta.loc[traj_id]['top_fn'] ) ), # Cpptraj uses 1-indexed frame number frame1=frame_id + 1, frame2=frame_id + 1, outfile=outfile, path='script.cpptraj', run=True ) # Topology if not self.from_solvated: write_tleap_script( pdb_file='seed.pdb', run=True, system_name='structure', path='script.tleap' ) # Apply hmr to new topologies hmr_prmtop(top_fn='structure.prmtop') else: os.symlink( os.path.relpath( os.path.join( basedir, self.meta.loc[traj_id]['top_fn'] ) ), 'structure.prmtop' ) # AMBER input files write_production_file() # Write information from provenance to file information = [ 'Parent trajectory:\t{}'.format(self.meta.loc[traj_id]['traj_fn']), 'Frame number:\t{}'.format(frame_id), 'Topology:\t{}'.format(self.meta.loc[traj_id]['top_fn']), '' ] provenance_fn = 'provenance.txt' with open(provenance_fn, 'w+') as f: f.write('\n'.join(information)) # When finished, update sim_count and go back to base dir to repeat sim_count += 1 os.chdir(basedir) return spawn_folder_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prep_data(self, num_processes):\n filenames = os.listdir(self.beatmaps_root)\n processes = []\n for i in range(num_processes):\n start = i * (len(filenames) // num_processes)\n end = None\n if i != num_processes - 1:\n end = (i + 1) * (len(filenames) // num_processes)\n else:\n end = len(filenames)\n processes.append(Process(target=self._prep_data_worker,\n args=(start, end, filenames)))\n for p in processes:\n p.start()\n for p in processes:\n p.join()", "def _fill_reserved(self) -> None:\n\n mpi_like_settings = [\n MpirunSettings,\n MpiexecSettings,\n OrterunSettings,\n PalsMpiexecSettings,\n ]\n for settings in mpi_like_settings:\n self._reserved_run_args[settings] = [\n \"np\",\n \"N\",\n \"c\",\n \"output-filename\",\n \"n\",\n \"wdir\",\n \"wd\",\n \"host\",\n ]\n self._reserved_run_args[SrunSettings] = [\n \"nodes\",\n \"N\",\n \"ntasks\",\n \"n\",\n \"ntasks-per-node\",\n \"output\",\n \"o\",\n \"error\",\n \"e\",\n \"job-name\",\n \"J\",\n \"jobid\",\n \"multi-prog\",\n \"w\",\n \"chdir\",\n \"D\",\n ]\n self._reserved_run_args[AprunSettings] = [\n \"pes\",\n \"n\",\n \"pes-per-node\",\n \"N\",\n \"l\",\n \"pes-per-numa-node\",\n \"S\",\n \"wdir\",\n ]\n self._reserved_batch_args[SbatchSettings] = [\n \"nodes\",\n \"N\",\n \"ntasks\",\n \"n\",\n \"ntasks-per-node\",\n \"output\",\n \"o\",\n \"error\",\n \"e\",\n \"job-name\",\n \"J\",\n \"jobid\",\n \"multi-prog\",\n \"w\",\n \"chdir\",\n \"D\",\n ]\n self._reserved_batch_args[CobaltBatchSettings] = [\n \"cwd\",\n \"error\",\n \"e\",\n \"output\",\n \"o\",\n \"outputprefix\",\n \"N\",\n \"l\",\n \"jobname\",\n ]\n self._reserved_batch_args[QsubBatchSettings] = [\"e\", \"o\", \"N\", \"l\"]\n self._reserved_run_args[JsrunSettings] = [\n \"chdir\",\n \"h\",\n \"stdio_stdout\",\n \"o\",\n \"stdio_stderr\",\n \"k\",\n \"tasks_per_rs\",\n \"a\",\n \"np\",\n \"p\",\n \"cpu_per_rs\",\n \"c\",\n \"gpu_per_rs\",\n \"g\",\n \"latency_priority\",\n \"l\",\n \"memory_per_rs\",\n \"m\",\n \"nrs\",\n \"n\",\n \"rs_per_host\",\n \"r\",\n \"rs_per_socket\",\n \"K\",\n \"appfile\",\n \"f\",\n \"allocate_only\",\n \"A\",\n \"launch_node_task\",\n \"H\",\n \"use_reservation\",\n \"J\",\n \"use_resources\",\n \"bind\",\n \"b\",\n \"launch_distribution\",\n \"d\",\n ]\n\n self._reserved_batch_args[BsubBatchSettings] = [\n \"J\",\n \"o\",\n \"e\",\n \"m\",\n \"n\",\n \"nnodes\",\n ]", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def prepare(self):\n per_col = 5\n spray_diameter = 10\n jids = []\n for i in range(self.gom_count):\n # Create JIDs\n gom_jid = f\"{settings.AGENT_NAMES['gom_base']}{i + 1}@{settings.HOST}\"\n tr_jid = f\"{settings.AGENT_NAMES['tr_base']}{i + 1}@{settings.HOST}\"\n jids.append((gom_jid, tr_jid))\n\n # Create GoM and TR positions\n y = (i % per_col) * 48 - 96\n x = int(i / per_col) * 64 - 32\n xo = random.gauss(0, spray_diameter)\n yo = random.gauss(0, spray_diameter)\n\n self.factory_map[gom_jid] = Point(x=float(x), y=float(y))\n self.tr_map[tr_jid] = Point(x=float(x + xo), y=float(y + yo))\n\n return jids", "def make_positions(\n night, runs, observatory, instrument, hlog, targets,\n skip_targets, tdata, posdata, load_old, retry,\n full, rname, smessages, fmessages, p2positions, okwrite\n):\n\n pdata = {}\n\n if load_old and os.path.exists(posdata):\n # pre-existing file found\n with open(posdata) as pin:\n for line in pin:\n arr = line.split()\n if len(arr) != 20:\n raise ValueError(\n f'Line = \"{line.strip()}\" from {posdata} had {len(arr)}!=20 items'\n )\n arr[3] = arr[3].replace('~',' ')\n pdata[arr[0]] = [\n '' if val == 'UNDEF' else val for val in arr[1:]\n ]\n print('Read position data from',posdata)\n\n if not retry:\n return pdata\n\n with open(posdata if okwrite else os.devnull,'w') as pout:\n for run in runs:\n\n if len(tdata[run]) == 1:\n # means its a power on/off\n continue\n\n if run in pdata and pdata[run][0] != '':\n # Already have positional data which we will\n # not re-do, so just write out to disk\n arr = ['UNDEF' if val == '' else val for val in pdata[run]]\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n continue\n\n recomp = True\n\n # Now going to try to work stuff out\n\n if full:\n print(f'Analysing positions for run {run}')\n\n # open the run file as an Rhead\n runname = os.path.join(night, run)\n try:\n if instrument == 'HiPERCAM':\n rhead = hcam.hcam.Rhead(runname)\n else:\n rhead = hcam.ucam.Rhead(runname)\n except:\n if full:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(f\"Failed to open {runname} as an Rhead\")\n continue\n\n # object name\n if hlog.format == 1:\n target = hlog.target[run]\n elif instrument == 'HiPERCAM':\n target = rhead.header.get(\"OBJECT\",'')\n else:\n target = rhead.header.get(\"TARGET\",'')\n target = target.strip().replace('~',' ')\n\n # RA, Dec lookup\n if target == '' or target in skip_targets:\n # don't even try\n autoid, ra, dec = 'UNDEF', 'UNDEF', 'UNDEF'\n recomp = False\n else:\n try:\n # See if we already have the info stored\n autoid, ra, dec = targets(target)\n except:\n # apparently we don't ...\n try:\n # attempt simbad lookup here\n autoid, ra, dec = target_lookup(target)\n targets.add_target(target, ra, dec, autoid)\n print(f' Added {target} to targets')\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n\n # save successful SIMBAD-based lookup\n smessages.append(\n f\"{autoid.replace(' ','~'):32s} \" +\n f\"{pos.to_string('hmsdms',sep=':',precision=2)} \" +\n f\"{target.replace(' ','~')}\"\n )\n\n except:\n if target in p2positions:\n # data loaded at the phase II stage -- last resort\n ra, dec = p2positions[target]\n print(f' Found {target} in phaseII data at RA={ra}, Dec={dec}')\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n targets.add_target(target, pos.ra.hour, pos.dec.value, target)\n autoid, ra, dec = targets(target)\n\n # save successful lookups\n smessages.append(\n f\"{target.replace(' ','~'):32s} \" +\n f\"{pos.to_string('hmsdms',sep=':',precision=2)} \" +\n f\"{target.replace(' ','~')}\"\n )\n\n else:\n # nothing worked\n print(\n f' No position found for {runname}, target = \"{target}\"'\n )\n autoid, ra, dec = 'UNDEF', 'UNDEF', 'UNDEF'\n skip_targets.append(target)\n\n # save in suitable format for adding to FAILED_TARGETS if wanted.\n fmessages.append(\n f\"{target.replace(' ','~'):32s} {rname} {night} {run}\"\n )\n recomp = False\n\n if not recomp and run in pdata:\n # can save a stack of time by not recomputing any Sun / Moon stuff\n arr = ['UNDEF' if val == '' else val for val in pdata[run]]\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n continue\n\n # start accumulating stuff to write out\n arr = [ra, dec, autoid]\n\n if ra == 'UNDEF' and dec == 'UNDEF' and instrument == 'ULTRASPEC':\n # for altitude / Sun / Moon stuff, telescope position\n # is good enough, so this is one final go at getting a\n # usable position.\n hd = rhead.header\n\n ra = hd.get(\"RA\", \"UNDEF\")\n dec = hd.get(\"Dec\", \"UNDEF\")\n if ra != 'UNDEF' and dec != 'UNDEF':\n try:\n ra, dec, syst = str2radec(ra + ' ' + dec)\n except:\n pass\n\n # time-dependent info\n ut_start, mjd_start, ut_end, mjd_end, cadence, \\\n expose, nok, ntotal = tdata[run]\n\n try:\n\n mjd_start = float(mjd_start)\n mjd_end = float(mjd_end)\n tstart = Time(mjd_start, format='mjd')\n tmid = Time((mjd_start+mjd_end)/2, format='mjd')\n tend = Time(mjd_end, format='mjd')\n\n # Scale Sun-Moon angle at mid time (0 = New Moon, 1 =\n # Full)\n sun_mid = get_sun(tmid)\n moon_mid = get_moon(tmid)\n sun_moon = sun_mid.separation(moon_mid).degree / 180\n\n if ra != 'UNDEF' and dec != 'UNDEF':\n\n # Calculate the Alt, Az at start, middle, end\n frames = AltAz(obstime=[tstart,tmid,tend], location=observatory)\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n points = pos.transform_to(frames)\n alts = [round(alt,1) for alt in points.alt.degree]\n azs = [round(az,1) for az in points.az.degree]\n arr += alts + azs\n\n # Calculate range of airmasses\n seczs = np.array([float(secz) for secz in points.secz])\n secz_min, secz_max = seczs.min(), seczs.max()\n\n # Need to check for meridian crossing, and if it happens\n # we need to close in on it\n sinas = [np.sin(az) for az in points.az]\n if sinas[0] > 0 and sinas[2] < 0:\n s1, s2 = sinas[0], sinas[2]\n t1, t2 = tstart, tend\n if sinas[1] > 0:\n s1 = sinas[1]\n t1 = tmid\n else:\n s2 = sinas[1]\n t2 = tmid\n while s1 - s2 > 0.0005:\n tguess = t1 + s1/(s1-s2)*(t2-t1)\n frame = AltAz(obstime=tguess, location=observatory)\n point = pos.transform_to(frame)\n sina = np.sin(point.az)\n if sina > 0:\n s1 = sina\n t1 = tguess\n else:\n s2 = sina\n t2 = tguess\n secz_min = float(point.secz)\n\n dsecz = round(secz_max-secz_min,2)\n arr += [round(secz_min,2), round(secz_max,2), dsecz]\n\n # Now calculate the angular distance from the Sun\n # and Moon at the mid-time\n sun_mid_trans = sun_mid.transform_to(frames[1])\n moon_mid_trans = moon_mid.transform_to(frames[1])\n point_mid = points[1]\n sun_dist = point_mid.separation(sun_mid_trans).degree\n moon_dist = point_mid.separation(moon_mid_trans).degree\n arr += [round(sun_dist,1),round(moon_dist,1)]\n\n else:\n arr = arr[:3] + 11*['UNDEF']\n\n # Now some data on the altitude of the Sun & Moon\n frame = AltAz(obstime=tstart, location=observatory)\n sun_start = get_sun(tstart).transform_to(frame)\n moon_start = get_moon(tstart).transform_to(frame)\n\n # end\n frame = AltAz(obstime=tend, location=observatory)\n sun_end = get_sun(tend).transform_to(frame)\n moon_end = get_moon(tend).transform_to(frame)\n\n arr += [\n round(sun_start.alt.degree,1), round(sun_end.alt.degree,1),\n round(moon_start.alt.degree,1), round(moon_end.alt.degree,1),\n round(sun_moon,3),\n ]\n\n except:\n if full:\n print(f\"Problem on run = {run}\")\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(\n exc_traceback, limit=1, file=sys.stdout\n )\n traceback.print_exc(file=sys.stdout)\n\n # write out info\n arr = arr[:3] + 16*['UNDEF']\n\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n\n arr[2] = arr[2].replace('~',' ')\n pdata[run] = [\n '' if val == 'UNDEF' else val for val in arr\n ]\n\n if okwrite:\n print('Written positional data to',posdata)\n\n return pdata", "def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()", "def __add_players_spawns(self):\n # Werewolves\n self.__grid[self.__werewolves_start[0]][self.__werewolves_start[1]][\"werewolves\"] \\\n = self.__number_of_beasts\n # Vampires\n self.__grid[self.__vampires_start[0]][self.__vampires_start[1]][\"vampires\"] \\\n = self.__number_of_beasts", "def prepare_runs(args):\n output_directory = _prepare_output_dir(args.output_directory)\n z_score_dir = args.z_score_dir\n region_list = args.region_list \n if args.region_list is None:\n try:\n flanking_region = int(args.flanking_region)\n except ValueError:\n logging.error('Flanking region argument needs to be an integer')\n sys.exit(COMMAND_LINE_ERROR)\n build = args.build\n bed_directory = args.bed_directory\n # Create the SNPList\n try:\n min_maf = float(args.maf)\n except:\n logging.error(\"Min Maf -m or --min-maf needs to be an floating point number\")\n sys.exit(COMMAND_LINE_ERROR)\n if args.region_list is not None:\n region_list = {}\n snp_list = []\n with open(args.region_list) as input_file:\n # When using no flaking region SNP must be valid, but it doesn't actually matter what it is, need to ensure that is actually the case.\n for i, line in enumerate(input_file):\n rsid = str(i)+ \"_\" + ''.join(line.strip().split(\"\\t\"))\n chromosome = line.strip().split(\":\")[0] \n snp = Snp(chromosome,\"1\",rsid)\n snp_list.append(snp)\n region_list[snp.rsid] = line.strip()\n else:\n snp_list = SnpList(args.snp_list, build)\n logging.info(snp_list)\n # Locus to process\n # population_to_extract_vcf\n if not args.annotation_only:\n no_flanking = args.flanking_units\n if no_flanking:\n raise NotImplementedError(\"Using a number of flanking SNPs instead of a region is not supported\")\n populations= args.populations.split(',')\n logging.info(\"Populations to process: {0}\".format(populations))\n loci = []\n gemini_databases = []\n output_vcfs = []\n for snp in snp_list:\n logging.info('Preparing output files for SNP {0}'.format(snp.rsid))\n locus = snp.rsid\n loci.append(locus)\n logging.info(\"Obtaining VCF file from the 1000 genomes project\")\n if region_list is not None:\n vcf = get_vcf_file(snp, string_region=region_list[locus])\n else: \n vcf = get_vcf_file(snp, flanking_region=flanking_region)\n for population in populations:\n tmp_vcf = extract_population_from_1000_genomes(vcf=vcf, super_population=population)\n z_score_file = get_relevant_zscore(snp.chrom, population, z_score_dir)\n pos_list_zscore = create_pos_hash_table(z_score_file)\n output_vcf = generate_zscore_and_vcf_output(output_directory=output_directory, zscore_hash=pos_list_zscore, vcf=tmp_vcf, locus=locus,population=population, multiply_rsquare=args.multiply_rsquare)\n if bed_directory is None:\n logging.info(\"Creating gemini database\")\n # TODO: Fix broxen gemini referenec\n gemini_databases.append(create_gemini_database(vcf=output_vcf))\n vcf_to_plink(locus, output_directory=output_directory, vcf=output_vcf, population=population)\n plink_to_ld_matrix(locus, output_directory=output_directory, population=population)\n logging.info(\"Generate transancestrals matrices\")\n generate_transancestral_output(loci, populations, output_directory)\n if bed_directory is None:\n logging.info(\"Generating annotation matrices to be used with Paintor\")\n logging.info(gemini_databases)\n generate_and_write_encode_annotations(databases=gemini_databases, output_directory=output_directory, loci=snp_list)\n else:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n # So finally we need to fix the LD matrices for inputting into PAINTOR. \n\n with open(os.path.join(output_directory, 'input.files'), 'w') as out_f:\n for snp in snp_list:\n out_f.write(snp.rsid +'\\n')\n # Remove .tbi files\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.tbi'):\n try:\n os.remove(file)\n except OSError:\n logging.warning(\"Could not remove a .tbi file from the 1000 genomes tabix run\")\n else: \n loci = []\n for snp in snp_list:\n loci.append(snp.rsid)\n if bed_directory is not None:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n logging.info(\"Finemapping file preparation complete\")", "def prepare():\n # getRelativeArt checks if scene is saved\n skeleton_path = paths.getRelativeArt()\n\n # if scene is modified, ask user if they would like to save, not save, or cancel operation\n if not uiwindow.save():\n pm.error('Scene not saved.')\n\n # perform a bone health check before referencing to emphasize any possible errors\n bone.health()\n\n # create new file, reference the skeleton into the new file, create rig group\n pm.newFile(force=True)\n rig_grp = pipernode.createRig()\n pm.createReference(skeleton_path, namespace=pcfg.skeleton_namespace)\n pm.createReference(skeleton_path, namespace=pcfg.bind_namespace)\n skinned_meshes = pipernode.get('piperSkinnedMesh')\n [node.visibility.set(False) for node in skinned_meshes if node.name().startswith(pcfg.bind_namespace)]\n pm.parent(skinned_meshes, rig_grp)\n lockMeshes()\n\n return rig_grp", "def prepare_rw_jobs(self, repeats):\n \n \n #The tasks we need to go through to append the report output\n taskList = [\n 'steadyState',\n 'timeCourse',\n 'scan',\n 'metabolicControlAnalysis',\n 'optimization',\n 'parameterFitting',\n 'fluxMode',\n 'lyapunovExponents',\n 'timeScaleSeparationAnalysis',\n 'sensitivities',\n 'moieties'\n ]\n \n \n task_report_targets = {} #Store the report output targets \n #Create a new COPASI file for each repeat\n #Keep a note of the output files we're creating\n model_files = []\n output_files = []\n for i in range(repeats):\n #For each task, if the report output is set, append it with '_i'\n for taskName in taskList:\n try:\n task = self._getTask(taskName)\n report = task.find(xmlns + 'Report')\n if i==0:\n task_report_targets[taskName] = report.attrib['target']\n report.attrib['target'] = str(i) + '_' + task_report_targets[taskName]\n if i==0:\n if task.attrib['scheduled'] == 'true':\n output_files.append(task_report_targets[taskName])\n \n except:\n pass #It's possible not every task has a report set. If this is the case, ignore it!\n \n filename = 'auto_copasi_1.%d.cps'%i\n target = os.path.join(self.path, filename)\n model_files.append(filename)\n \n self.write(target)\n \n return model_files, output_files", "def init_processes(rank, size, backend='gloo'):\n os.environ['MASTER_ADDR'] = '12.12.10.13'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)", "def prepare_sp_jobs(self, no_of_jobs, skip_load_balancing=False, custom_report=False):\n import shutil\n i=0\n \n #ALTER -- Bring back skip_load_balancing? \n \n \n #Benchmarking.\n #As per usual, first calculate how long a single parameter fit will take\n \n self._clear_tasks() #Program stops here.\n \n \n fitTask = self._getTask('parameterFitting')\n fitTask.attrib['scheduled'] = 'true'\n fitTask.attrib['updateModel'] = 'false'\n \n #Even though we're not interested in the output at the moment, we have to set a report for the parameter fitting task, or Copasi will complain!\n #Only do this if custom_report is false\n #if not custom_report:\n #Create a new report for the or task\n report_key = 'condor_copasi_parameter_fitting_repeat_report'\n self._create_report('SP', report_key, 'auto_pr_report')\n \n #And set the new report for the or task\n fitReport = fitTask.find(xmlns + 'Report')\n if custom_report:\n custom_report_key = fitReport.attrib['reference']\n \n \n #If no report has yet been set, report == None. Therefore, create new report\n if fitReport == None:\n fitReport = etree.Element(xmlns + 'Report')\n fitTask.insert(0,fitReport)\n \n \n #if not custom_report:\n fitReport.set('reference', report_key)\n \n fitReport.set('append', '1')\n fitReport.set('target', 'copasi_temp_output.txt') \n \n '''\n if not skip_load_balancing:\n import tempfile\n tempdir = tempfile.mkdtemp()\n \n temp_filename = os.path.join(tempdir, 'auto_copasi_temp.cps')\n \n #Copy the data file(s) over to the temp dir\n import shutil\n for data_file_line in open(os.path.join(self.path, 'data_files_list.txt'),'r'):\n data_file = data_file_line.rstrip('\\n')\n shutil.copy(os.path.join(self.path, data_file), os.path.join(tempdir, data_file))\n \n #Write a temp XML file\n self.write(temp_filename)\n \n #Note the start time\n start_time = time.time()\n self._copasiExecute(temp_filename, tempdir, timeout=int(settings.IDEAL_JOB_TIME*60))\n finish_time = time.time()\n time_per_step = finish_time - start_time\n \n #Remove the temp directory tree\n shutil.rmtree(tempdir)\n\n \n #We want to split the scan task up into subtasks of time ~= 10 mins (600 seconds)\n #time_per_job = repeats_per_job * time_per_step => repeats_per_job = time_per_job/time_per_step\n \n time_per_job = settings.IDEAL_JOB_TIME * 60\n \n #Calculate the number of repeats for each job. If this has been calculated as more than the total number of steps originally specified, use this value instead\n repeats_per_job = min(int(round(float(time_per_job) / time_per_step)), repeats)\n\n else:\n repeats_per_job = 1\n #no_of_jobs = int(math.ceil(float(repeats) / repeats_per_job))\n '''\n ############\n #Job preparation\n ############\n \n self._clear_tasks() #This also stops the program.\n \n \n fitReport.attrib['target'] = ''\n # Hack - Copasi does not update parameters if only update model set in scan, so we have to set it also in parameterFitting task\n #Get the parameter estimation task\n fitTask = self._getTask('parameterFitting')\n\n\n fitTask.attrib['updateModel'] = 'false'\n #Get the scan task\n scanTask = self._getTask('scan')\n \n \n scanTask.attrib['scheduled'] = 'true'\n scanTask.attrib['updateModel'] = 'false'\n\n #Set the new report for the scan task\n report = scanTask.find(xmlns + 'Report')\n \n #If no report has yet been set, report == None. Therefore, create new report\n if report == None:\n report = etree.Element(xmlns + 'Report')\n scanTask.insert(0,report)\n \n \n if custom_report:\n report.set('reference', custom_report_key)\n \n else:\n report.set('reference', report_key)\n report.set('append', '1')\n \n #Prepare the scan task\n #Open the scan problem, and clear any subelements\n scan_problem = scanTask.find(xmlns + 'Problem')\n scan_problem.clear()\n \n \n #Add a subtask parameter (value 5 for parameter estimation)\n subtask_parameter = etree.SubElement(scan_problem, xmlns + 'Parameter')\n subtask_parameter.attrib['name'] = 'Subtask'\n subtask_parameter.attrib['type'] = 'unsignedInteger'\n subtask_parameter.attrib['value'] = '5'\n \n \n #Add a single ScanItem for the repeats\n subtask_pg = etree.SubElement(scan_problem, xmlns + 'ParameterGroup')\n subtask_pg.attrib['name'] = 'ScanItems'\n subtask_pg_pg = etree.SubElement(subtask_pg, xmlns + 'ParameterGroup')\n subtask_pg_pg.attrib['name'] = 'ScanItem'\n p1 = etree.SubElement(subtask_pg_pg, xmlns+'Parameter')\n p1.attrib['name'] = 'Number of steps'\n p1.attrib['type'] = 'unsignedInteger'\n p1.attrib['value'] = '0'# Assign this later\n\n \n p2 = etree.SubElement(subtask_pg_pg, xmlns+'Parameter')\n p2.attrib['name'] = 'Type'\n p2.attrib['type'] = 'unsignedInteger'\n p2.attrib['value'] = '0'\n \n p3 = etree.SubElement(subtask_pg_pg, xmlns+'Parameter')\n p3.attrib['name'] = 'Object'\n p3.attrib['type'] = 'cn'\n p3.attrib['value'] = ''\n \n p4 = etree.SubElement(scan_problem, xmlns+'Parameter')\n p4.attrib['name'] = 'Output in subtask'\n p4.attrib['type'] = 'bool'\n p4.attrib['value'] = '1'\n \n p5 = etree.SubElement(scan_problem, xmlns+'Parameter')\n p5.attrib['name'] = 'Adjust initial conditions'\n p5.attrib['type'] = 'bool'\n p5.attrib['value'] = '0'\n \n \n ############\n #Prepare the Copasi files\n ############\n \n repeat_count = 0\n \n for j in range(no_of_jobs):\n '''if repeats_per_job + repeat_count > repeats:\n no_of_repeats = repeats - repeat_count\n else:\n no_of_repeats = repeats_per_job\n repeat_count += no_of_repeats\n '''\n #Set the number of repeats for the scan task\n #ALTER think this should be 1.\n p1.attrib['value'] = str(1)\n #And the report target output\n report.attrib['target'] = str(j) + '_out.txt'\n \n filename = os.path.join(self.path, str(j), 'auto_copasi_' + str(j) +'.cps')\n self.write(filename)\n \n return no_of_jobs", "def call_pssm(self):\n pdb_to_fasta(self.simple_name_A)\n psiblast_string_A = self.psiblast_path + \" -query \" + self.pdb_name + \"_\" + self.chains[0] + \".fasta\" \\\n + \" -evalue 0.001 -num_iterations 3 -db \" + self.nr_path + \" -outfmt 5 -out \" \\\n + \"output/pssm_\" + self.pdb_name + \"_\" + self.chains[0] + \".txt -out_ascii_pssm output/pssm_\" \\\n + self.pdb_name + \"_\" + self.chains[0] + \".pssm -num_threads 40\"\n os.system(psiblast_string_A)\n\n pdb_to_fasta(self.simple_name_B)\n psiblast_string_B = self.psiblast_path + \" -query \" + self.pdb_name + \"_\" + self.chains[1] + \".fasta\" \\\n + \" -evalue 0.001 -num_iterations 3 -db \" + self.nr_path + \" -outfmt 5 -out \" \\\n + \"output/pssm_\" + self.pdb_name + \"_\" + self.chains[1] + \".txt -out_ascii_pssm output/pssm_\" \\\n + self.pdb_name + \"_\" + self.chains[1] + \".pssm -num_threads 40\"\n os.system(psiblast_string_B)", "def main():\n\n # Run each creep\n for name in Object.keys(Game.creeps):\n creep = Game.creeps[name]\n harvester.run_harvester(creep)\n\n # Run each spawn\n for name in Object.keys(Game.spawns):\n spawn = Game.spawns[name]\n if not spawn.spawning:\n # Get the number of our creeps in the room.\n num_creeps = _.sum(Game.creeps, lambda c: c.pos.roomName == spawn.pos.roomName)\n # If there are no creeps, spawn a creep once energy is at 250 or more\n if num_creeps < 0 and spawn.room.energyAvailable >= 250:\n spawn.createCreep([WORK, CARRY, MOVE, MOVE])\n # If there are less than 15 creeps but at least one, wait until all spawns and extensions are full before\n # spawning.\n elif num_creeps < 15 and spawn.room.energyAvailable >= spawn.room.energyCapacityAvailable:\n # If we have more energy, spawn a bigger creep.\n if spawn.room.energyCapacityAvailable >= 350:\n spawn.createCreep([WORK, CARRY, CARRY, MOVE, MOVE, MOVE])\n else:\n spawn.createCreep([WORK, CARRY, MOVE, MOVE])", "def makeSetup(self):\n startingRanks = [FLAG, SPY, SCOUT, SCOUT, MINER, MINER, GENERAL, MARSHALL, BOMB, BOMB]\n startingSpots = random.sample(self.getStartSpots(), len(startingRanks))\n pieces = []\n for i in range(len(startingRanks)):\n pieces += [Piece(startingRanks[i], startingSpots[i], self.index)]\n # print [(str(p), p.position) for p in pieces]\n return pieces", "def prepare_process(self):\n max_wallclock_seconds = self.ctx.inputs.metadata.options.get('max_wallclock_seconds', None)\n\n if max_wallclock_seconds is not None and 'time_limit' not in self.ctx.inputs.parameters['INPUT_XSPECTRA']:\n self.set_max_seconds(max_wallclock_seconds)\n\n if self.ctx.restart_calc:\n self.ctx.inputs.parameters['INPUT_XSPECTRA']['restart_mode'] = 'restart'\n self.ctx.inputs.parent_folder = self.ctx.restart_calc.outputs.remote_folder", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def __createSPWSeparationCommands(self):\n\n # Get a unique list of selected spws \n self.__selectMS()\n spwList = self.__getSPWUniqueList()\n numSubMS = self._arg['numsubms']\n if isinstance(numSubMS,str) and numSubMS == 'auto':\n # Create the best load balance based on the number of nodes\n numSubMS = self.getNumberOfServers()\n if numSubMS == None:\n numSubMS = 8\n numSubMS = min(len(spwList),numSubMS)\n\n # Get a dictionary of the spws parted for each subMS\n spwList = map(str,spwList)\n partitionedSPWs1 = self.__partition1(spwList,numSubMS)\n\n # Add the channel selections back to the spw expressions\n newspwsel = self.__createSPWExpression(partitionedSPWs1)\n \n # Validate the chanbin parameter\n validbin = False\n parname = self.getChanAvgParamName()\n if self.validateChanBin():\n if isinstance(self._arg[parname],list):\n freqbinlist = self.__partition1(self._arg[parname],numSubMS)\n validbin = True\n \n # Calculate the ddistart for each engine. This will be used\n # to calculate the DD IDs of the output main table of the subMSs\n ddistartlist = self.__calculateDDIstart({}, partitionedSPWs1)\n if (len(ddistartlist) != len(partitionedSPWs1)):\n casalog.post('Error calculating the ddistart indices','SEVERE')\n raise\n \n for output in xrange(numSubMS):\n mmsCmd = copy.copy(self._arg)\n mmsCmd['createmms'] = False\n if self.__selectionScanList is not None:\n mmsCmd['scan'] = ParallelTaskHelper.\\\n listToCasaString(self.__selectionScanList) \n mmsCmd['spw'] = newspwsel[output]\n if validbin:\n mmsCmd[parname] = freqbinlist[output]\n \n self.__ddistart = ddistartlist[output]\n mmsCmd['ddistart'] = self.__ddistart\n mmsCmd['outputvis'] = self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, output)\n\n # Dictionary for the spw/ddi consolidation later\n self.__ddidict[self.__ddistart] = self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, output)\n\n if not self._mpi_cluster:\n self._executionList.append(JobData(self._taskName, mmsCmd))\n else:\n self._executionList.append([self._taskName + '()',mmsCmd])", "def create_ptx(self):\n\n self.lower_pmos_inst=self.add_inst(name=\"lower_pmos\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"br\", \"vdd\"])\n\n self.upper_pmos1_inst=self.add_inst(name=\"upper_pmos1\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"vdd\", \"vdd\"])\n\n self.upper_pmos2_inst=self.add_inst(name=\"upper_pmos2\",\n mod=self.pmos)\n self.connect_inst([\"br\", \"en\", \"vdd\", \"vdd\"])", "def prep_matlab(self):\n #allparams = self.__dict__ #NOTE: change to include just needed parameters\n #allparams.update(self.Set.__dict__)\n #print allparams\n # Quick Fix\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n settings = {'DataDir':self.DataDir,\n 'ProcDir':self.ProcDir,\n 'ScriptDir':self.ScriptDir,\n 'OutDir':self.OutDir,\n 'AuxDir':self.AuxDir,\n 'Cothresh':self.Cothresh,\n 'Igthresh':self.Igthresh,\n 'Damping':self.Damping,\n 'Width':self.Set.Width,\n 'Length':self.Set.Length,\n 'Dates':'\\n'.join(self.Set.Dates.astype('S8')),\n 'DatesSerial':'\\n'.join(self.Set.DatesSerial.astype('S8')),\n 'TimeIntervals':'\\n'.join(self.Set.TimeIntervals.astype('S4')),\n 'TimeIndex':self.Set.TimeIndexString,\n 'Pairs':'\\n'.join(self.Set.PairsString),\n 'PairsSerial':'\\n'.join(self.Set.PairsSerialString),\n #'Names':'\\n'.join(self.Set.Names),\n #'Paths':'\\n'.join(self.Set.Names),\n 'ChronList':'\\n'.join(self.Set.ChronList),\n 'Omissions':'\\n'.join(self.Set.Omissions),\n 'Tandems':'\\n'.join(self.Set.Tandems)}\n\n fullpath = os.path.join(self.RunDir,'defaults.m')\n prerun = open(fullpath, 'w')\n prerun.write(\n\"\"\"\n%% Automatically created parameters file for RunTS.m\n%% created with roi_py.py\n%% =============================================================================\n%% Raw Data Directory\ndataDir = '{DataDir}';\n%% Masked/Tweaked Data Directory\nprocDir = '{ProcDir}';\n%% Output directory\noutDir = '{OutDir}';\n%% Scripts directory\nscriptDir = '{ScriptDir}';\n%% Auxilary files directory\nauxDir = '{AuxDir}';\n\n%% Coherence threshold (pixels with coherence less than 'maskthresh' will be\n%% marked as NaNs for scrapping or interpolation if desired.\nmaskThresh = {Cothresh};\n\n%% IGdensity threshold (pixels with # of non-interpolated data points less\n%% than IGthresh will be set to NaN in deformation_mod.m\nigThresh = {Igthresh};\n\n%% WDLS damping term in inversion_mod.m\ndamping = {Damping};\n\n%% Master scene dimensions\nwidth = {Width};\nleng = {Length};\n\n%% List of SAR acquisition dates for interferogram set\ndates = [{Dates}];\n\n%% SAR acquisition dates in python 'datetime' serial format\ndatesSerial = [{DatesSerial}];\n\n%% Number of days between consecutive SAR acquisitions\ndt = [{TimeIntervals}];\n\n%% Time Index\ntimeIndex = [{TimeIndex}];\n\n%% Interferogram master & slave dates\nigrams = [{Pairs}];\n\n%% Interferogram master & slave dates in serial format\nigramsSerial = [{PairsSerial}];\n\n%% Chronological list of interferogram file names used in matlab routines\nigramsList = [{ChronList}];\n\n%% User-specified ommissions\nomitList = [{Omissions}];\n\n%% Tandem pairs = [{Tandems}];\n\"\"\".format(**settings))\n prerun.close()\n print('Wrote %s, ready for RunTS.m' % fullpath)\n\n #pickle the omissions list for easy re-use later\n #NOTE: ultimately write this all in python and use input/output ascii files\n if hasattr(self,'Omissions'):\n pickle.dump(list(self.Omissions.keys()), os.path.join(self.RunDir,'omissions.p'))\n #to reload set10.omit(IG=pickle.load('omissions.p'))", "def __createScanSeparationCommands(self):\n \n scanList = self.__selectionScanList\n if scanList is None:\n self.__selectMS()\n scanList = self.__getScanList()\n\n # Make sure we have enough scans to create the needed number of\n # subMSs. If not change the total expected.\n numSubMS = self._arg['numsubms']\n if isinstance(numSubMS,str) and numSubMS == 'auto':\n # Create the best load balance based on the number of nodes\n numSubMS = self.getNumberOfServers()\n if numSubMS == None:\n numSubMS = 8\n numSubMS = min(len(scanList),numSubMS)\n \n partitionedScans = self.__partition(scanList, numSubMS) \n for output in xrange(numSubMS):\n mmsCmd = copy.copy(self._arg)\n mmsCmd['createmms'] = False\n mmsCmd['scan']= ParallelTaskHelper.\\\n listToCasaString(partitionedScans[output]) \n mmsCmd['outputvis'] = self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, output)\n if not self._mpi_cluster:\n self._executionList.append(JobData(self._taskName, mmsCmd))\n else:\n self._executionList.append([self._taskName + '()',mmsCmd])", "def __createNoSeparationCommand(self):\n\n submslist = ParallelTaskHelper.getReferencedMSs(self._arg['vis'])\n if len(submslist) == 0:\n raise ValueError, 'There are no subMSs in input vis'\n \n tbTool = tbtool()\n\n listOutputMS = []\n\n subMs_idx = 0\n for subMS in submslist:\n\n # make sure the SORTED_TABLE keywords are disabled\n tbTool.open(subMS, nomodify=False)\n if 'SORTED_TABLE' in tbTool.keywordnames():\n tobeDeleted = tbTool.getkeyword('SORTED_TABLE').split(' ')[1]\n tbTool.removekeyword('SORTED_TABLE')\n os.system('rm -rf '+tobeDeleted)\n \n tbTool.close() \n\n listOutputMS.append(self.dataDir+'/%s.%04d.ms' \\\n % (self.outputBase, subMs_idx))\n subMs_idx += 1\n\n # Override the original parameters\n self.override_arg('outputvis',listOutputMS)\n \n self._consolidateOutput = False\n \n # Add to the list of jobs to execute\n subMs_idx = 0\n for subMS in submslist:\n localArgs = copy.copy(self._arg)\n localArgs['vis'] = subMS\n for key in self._arguser:\n localArgs[key] = self._arguser[key][subMs_idx]\n \n if self._arg.has_key('createmms'):\n self._arg['createmms'] = False\n localArgs['createmms'] = False\n \n subMs_idx += 1\n if not self._mpi_cluster:\n self._executionList.append(JobData(self._taskName, localArgs))\n else:\n self._executionList.append([self._taskName + '()',localArgs])", "def new_tasks(self, extra):\n\n tasks = []\n\n try:\n fd = open(self.params.command_file)\n \n self.result_dir = os.path.dirname(self.params.output)\n \n for line in fd:\n command = line.strip()\n\n if not command:\n # ignore black lines\n continue\n\n cmd_args = _parse_command(command)\n \n # setting jobname\n jobname = \"gc_gps-%s%s%s%s%s\" % (cmd_args['pos'],\n cmd_args['realizations'],\n cmd_args['snr'],\n cmd_args['mast.h'],\n cmd_args['sd.mast.o'])\n\n extra_args = extra.copy()\n extra_args['jobname'] = jobname\n # FIXME: ignore SessionBasedScript feature of customizing \n # output folder\n extra_args['output_dir'] = self.params.output\n\n extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', os.path.join('.computation',jobname))\n\n self.log.debug(\"Creating Task for command: %s\" % command)\n\n tasks.append(GcgpsTask(\n command,\n self.params.R_source_folder,\n self.result_dir,\n self.params.input_dir,\n **extra_args))\n\n except IOError, ioe:\n self.log.error(\"Error while reading command file \" +\n \"%s.\" % self.params.command_file +\n \"Message: %s\" % ioe.message)\n except Exception, ex:\n self.log.error(\"Unexpected error. Error type: %s, Message: %s\" % (type(ex),str(ex)))\n\n finally:\n fd.close()\n\n return tasks", "def populateMasteredAssets(*args):\n #clear the lists first\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, ra=True)\n\n chars, props, sets = cFuncs.getProjectAssetList(pi.assetFolder)\n\n #check for rig masters\n for char in chars:\n cMstr = cFuncs.getAssetMaster(char, cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", char)), \"rig\")\n if cMstr:\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, a=char, dcc=showAssetImage)\n for prop in props:\n pMstr = cFuncs.getAssetMaster(prop, cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", prop)), \"rig\") \n if pMstr:\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, a=prop, dcc=showAssetImage)\n for sett in sets:\n sMstr = cFuncs.getAssetMaster(sett, cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", sett)), \"rig\") \n if sMstr:\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, a=sett, dcc=showAssetImage)\n\n #check for anim variants and masters\n varAnm = []\n shots = cFuncs.getProjectShotList(pi.currentProject)\n # print \"shotWin.populateMasteredAssets (line 937): shots =\", shots\n if shots:\n for shot in shots:\n shotVars = cFuncs.getShotVariantDict(os.path.join(pi.currentProject, \"shots\", shot))\n if shotVars[\"anm\"]:\n for anm in shotVars[\"anm\"]:\n aMstr = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm)))\n #print cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm))\n if aMstr: \n varAnm.append(\"{0}.{1}\".format(anm, shot))\n\n for av in varAnm:\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, a=av)\n\n populateSceneRefs()", "def bootstrap(self):\n\n\t\t#---paths.yaml specifies directories which might be absent so make them\n\t\tif not os.path.isdir(self.postdir): os.mkdir(self.postdir)\n\t\tif not os.path.isdir(self.plotdir): os.mkdir(self.plotdir)\n\t\t#---parse the simulations found in each \"spot\"\n\t\tfor spot in self.spots: self.treeparser(spot)\n\t\t#---if there is a part named edr then we use it to get simulation times\n\t\t#---! edr files are required to infer times for slicing however we might also use xtc or trr later\n\t\tassert 'edr' in zip(*self.spots.keys())[1]\n\t\tself.treeparser_edr()\n\t\t#---data are stored in dictionaries by spot name\n\t\tall_top_keys = [i for j in [k.keys() for k in self.toc.values()] for i in j]\n\n\t\t#---! under development\n\t\tfor key in ['post','groups','slices']:\n\t\t\tif key not in self.members_with_specific_parts:\n\t\t\t\tself.__dict__[key] = {i:{} for i in all_top_keys}\n\t\t\telse: self.__dict__[key] = {(spot,i):{} \n\t\t\t\tfor spot in self.toc for i in self.toc[spot]}\n\t\tself.save()", "def startService(self):\n super(SpawnerService, self).startService()\n for spawn in self.pendingSpawns:\n self.spawn(*spawn)\n self.pendingSpawns = []", "def populatereadylist():\n readyList.append(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyList.append(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyList.append(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return", "def _instantiate_processes(self, input=None, context=None):\n# FIX: ALLOW Projections (??ProjectionTiming TUPLES) TO BE INTERPOSED BETWEEN MECHANISMS IN PATHWAY\n# FIX: AUGMENT LinearMatrix TO USE FULL_CONNECTIVITY_MATRIX IF len(sender) != len(receiver)\n\n # # MODIFIED 2/8/17 OLD: [SEE BELOW]\n # self.variable = []\n # MODIFIED 2/8/17 END\n self.mechanismsDict = {}\n self._all_mech_tuples = []\n self._allMechanisms = MechanismList(self, self._all_mech_tuples)\n\n # Get list of processes specified in arg to init, possibly appended by EVCMechanism (with prediction processes)\n processes_spec = self.processes\n\n # Assign default Process if PROCESS is empty, or invalid\n if not processes_spec:\n from PsyNeuLink.Components.Process import Process_Base\n processes_spec.append(ProcessTuple(Process_Base(), None))\n\n # If input to system is specified, number of items must equal number of processes with origin mechanisms\n if input is not None and len(input) != len(self.originMechanisms):\n raise SystemError(\"Number of items in input ({}) must equal number of processes ({}) in {} \".\n format(len(input), len(self.originMechanisms),self.name))\n\n #region VALIDATE EACH ENTRY, STANDARDIZE FORMAT AND INSTANTIATE PROCESS\n\n # Convert all entries to (process, input) tuples, with None as filler for absent input\n input_index = input_index_curr = 0\n for i in range(len(processes_spec)):\n\n # MODIFIED 2/8/17 NEW:\n # Get list of origin mechanisms for processes that have already been converted\n # (for use below in assigning input)\n orig_mechs_already_processed = list(p[0].originMechanisms[0] for\n p in processes_spec if isinstance(p,ProcessTuple))\n # MODIFIED 2/8/17 END\n\n # Entry is not a tuple\n # presumably it is a process spec, so enter it as first item of ProcessTuple\n if not isinstance(processes_spec[i], tuple):\n processes_spec[i] = ProcessTuple(processes_spec[i], None)\n\n # Entry is a tuple but not a ProcessTuple, so convert it\n if isinstance(processes_spec[i], tuple) and not isinstance(processes_spec[i], ProcessTuple):\n processes_spec[i] = ProcessTuple(processes_spec[i][0], processes_spec[i][1])\n\n # Input was NOT provided on command line, so get it from the process\n if input is None:\n process = processes_spec[i].process\n process_input = []\n for process_input_state in process.processInputStates:\n process_input.extend(process_input_state.value)\n processes_spec[i] = ProcessTuple(process, process_input)\n # Input was provided on command line, so assign that to input item of tuple\n else:\n # Assign None as input to processes implemented by controller (controller provides their input)\n # (e.g., prediction processes implemented by EVCMechanism)\n if processes_spec[i].process._isControllerProcess:\n processes_spec[i] = ProcessTuple(processes_spec[i].process, None)\n else:\n # MODIFIED 2/8/17 NEW:\n # Replace input item in tuple with one from command line\n # Note: check if origin mechanism for current process is same as any previous one;\n # if it is, use that one (and don't increment index for input\n # otherwise, assign input and increment input_index\n try:\n input_index_curr = orig_mechs_already_processed.index(processes_spec[i][0].originMechanisms[0])\n except ValueError:\n input_index += 1\n processes_spec[i] = ProcessTuple(processes_spec[i].process, input[input_index_curr])\n input_index_curr = input_index\n # MODIFIED 2/8/17 END\n\n # Validate input\n if (processes_spec[i].input is not None and\n not isinstance(processes_spec[i].input,(numbers.Number, list, np.ndarray))):\n raise SystemError(\"Second item of entry {0} ({1}) must be an input value\".\n format(i, processes_spec[i].input))\n\n process = processes_spec[i].process\n process_input = processes_spec[i].input\n\n # # MODIFIED 2/8/17 OLD: [MOVED ASSIGNMENT OF self.variable TO _instantiate_graph()\n # # SINCE THAT IS WHERE SYSTEM'S ORIGIN MECHANISMS ARE IDENTIFIED]\n # self.variable.append(process_input)\n # # MODIFIED 2/8/17 END\n\n # IMPLEMENT: THIS IS WHERE LEARNING SPECIFIED FOR A SYSTEM SHOULD BE IMPLEMENTED FOR EACH PROCESS IN THE\n # SYSTEM; NOTE: IF THE PROCESS IS ALREADY INSTANTIATED WITHOUT LEARNING\n # (FIRST CONDITIONAL BELOW), MAY NEED TO BE RE-INSTANTIATED WITH LEARNING\n # (QUESTION: WHERE TO GET SPECS FOR PROCESS FOR RE-INSTANTIATION??)\n\n # If process item is a Process object, assign process_input as default\n if isinstance(process, Process):\n if process_input is not None:\n process._assign_defaults(variable=process_input, context=context)\n # If learning_rate is specified for system but not for process, then apply to process\n # # MODIFIED 3/21/17 OLD:\n # if self.learning_rate and not process.learning_rate:\n # # FIX: assign_params WANTS TO CREATE A ParamaterState ON process FOR learning_rate\n # process.assign_params(request_set={LEARNING_RATE:self.learning_rate})\n # # MODIFIED 3/21/17 NEW:[learning_rate SHOULD BE NOT BE RE-ASSIGNED FOR PROCESS, BUT RATHER ON EXECUTE]\n # if self.learning_rate is not None and process.learning_rate is None:\n # process.learning_rate = self.learning_rate\n # # MODIFIED 3/21/17 END\n\n # Otherwise, instantiate Process\n else:\n if inspect.isclass(process) and issubclass(process, Process):\n # FIX: MAKE SURE THIS IS CORRECT\n # Provide self as context, so that Process knows it is part of a System (and which one)\n # Note: this is used by Process._instantiate_pathway() when instantiating first Mechanism\n # in Pathway, to override instantiation of projections from Process.input_state\n process = Process(default_input_value=process_input,\n learning_rate=self.learning_rate,\n context=self)\n elif isinstance(process, dict):\n # IMPLEMENT: HANDLE Process specification dict here;\n # include process_input as ??param, and context=self\n raise SystemError(\"Attempt to instantiate process {0} in kwProcesses of {1} \"\n \"using a Process specification dict: not currently supported\".\n format(process.name, self.name))\n else:\n raise SystemError(\"Entry {0} of kwProcesses ({1}) must be a Process object, class, or a \"\n \"specification dict for a Process\".format(i, process))\n\n # # process should now be a Process object; assign to processList\n # self.processList.append(process)\n\n # Assign the Process a reference to this System\n process.systems.append(self)\n if process.learning:\n self.learning = True\n\n # Get max of Process phaseSpecs\n self._phaseSpecMax = int(max(math.floor(process._phaseSpecMax), self._phaseSpecMax))\n\n # Iterate through mechanism tuples in Process' mech_tuples\n # to construct self._all_mech_tuples and mechanismsDict\n # FIX: ??REPLACE WITH: for sender_mech_tuple in Process._mech_tuples\n for sender_mech_tuple in process._mech_tuples:\n\n sender_mech = sender_mech_tuple.mechanism\n\n # THIS IS NOW DONE IN _instantiate_graph\n # # Add system to the Mechanism's list of systems of which it is member\n # if not self in sender_mech_tuple[MECHANISM].systems:\n # sender_mech.systems[self] = INTERNAL\n\n # Assign sender mechanism entry in self.mechanismsDict, with mech_tuple as key and its Process as value\n # (this is used by Process._instantiate_pathway() to determine if Process is part of System)\n # If the sender is already in the System's mechanisms dict\n if sender_mech_tuple.mechanism in self.mechanismsDict:\n existing_mech_tuple = self._allMechanisms._get_tuple_for_mech(sender_mech)\n if not sender_mech_tuple is existing_mech_tuple:\n # Contents of tuple are the same, so use the tuple in _allMechanisms\n if (sender_mech_tuple.phase == existing_mech_tuple.phase and\n sender_mech_tuple.params == existing_mech_tuple.params):\n pass\n # Contents of tuple are different, so raise exception\n else:\n if sender_mech_tuple.phase != existing_mech_tuple.phase:\n offending_tuple_field = 'phase'\n offending_value = PHASE_ITEM\n else:\n offending_tuple_field = 'process_input'\n offending_value = PARAMS_ITEM\n raise SystemError(\"The same mechanism in different processes must have the same parameters:\"\n \"the {} ({}) for {} in {} does not match the value({}) in {}\".\n format(offending_tuple_field,\n sender_mech_tuple.mechanism,\n sender_mech_tuple[offending_value],\n process,\n existing_mech_tuple[offending_value],\n self.mechanismsDict[sender_mech_tuple.mechanism]\n ))\n # Add to entry's list\n self.mechanismsDict[sender_mech].append(process)\n else:\n # Add new entry\n self.mechanismsDict[sender_mech] = [process]\n if not sender_mech_tuple in self._all_mech_tuples:\n self._all_mech_tuples.append(sender_mech_tuple)\n\n process._allMechanisms = MechanismList(process, tuples_list=process._mech_tuples)\n\n # # MODIFIED 2/8/17 OLD: [SEE ABOVE]\n # self.variable = convert_to_np_array(self.variable, 2)\n # # MODIFIED 2/8/17 END\n #\n # # Instantiate processList using process_tuples, and point self.processes to it\n # # Note: this also points self.params[kwProcesses] to self.processes\n self.process_tuples = processes_spec\n self._processList = ProcessList(self, self.process_tuples)\n self.processes = self._processList.processes" ]
[ "0.5822994", "0.5640463", "0.5550681", "0.5531895", "0.545077", "0.5334903", "0.5310665", "0.52848184", "0.5238615", "0.51830214", "0.5171756", "0.516098", "0.515843", "0.5146447", "0.5145911", "0.51302344", "0.5127587", "0.51138264", "0.5095258", "0.50653327", "0.50255686", "0.50125045", "0.50044644", "0.49692857", "0.49652842", "0.4955127", "0.49510884", "0.4944789", "0.4938533", "0.49183825" ]
0.66021925
0
Create a bash script to run the jobs locally in the GPUs Requires pmemd.cuda_SPFP to be installed and in the $PATH bash environment variable.
def run_GPUs_bash(self, folders, run=True): if type(folders) == str: folders = glob(folders) elif type(folders) == list: pass else: raise ValueError('folders must be of type str or list') bash_cmd = "export CUDA_VISIBLE_DEVICES=0\n" bash_cmd += "export curr_dir=$(pwd)\n" num_folders = len(folders) if num_folders > self.available_gpus: raise ValueError("Cannot run jobs of {} folders as only {} GPUs are available".format(num_folders, self.available_gpus)) for folder in folders: bash_cmd += 'cd {}\n'.format(folder) bash_cmd += """nohup pmemd.cuda_SPFP -O -i Production.in \ -c seed.ncrst -p structure.prmtop -r Production.rst \ -x Production.nc & ((CUDA_VISIBLE_DEVICES++)) cd ${curr_dir} """ # Write bash script to file with open('run.sh', 'w') as f: f.write(bash_cmd) # Run bash script if run: output = subprocess.check_output(['bash', './run.sh', '&']) logger.info('Output of run procees was\n{}\n'.format(output)) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gpu_cuda_code() -> None:\n if get_from_environ(\"DISABLE_GPU_FOR_TESTING\") is not None:\n print(\"GPU payload disabled for testing\")\n return\n\n # if the command exists it can run on the hardware below\n proc = subprocess.Popen([\"nvidia-smi\"], stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n str_stdout = stdout.decode()\n assert \"NVIDIA-SMI\" in str_stdout, str_stdout\n assert proc.returncode == 0\n # search the history for the CUDA implementation", "def launch_run( # pylint: disable=bad-continuation\n run_path, py_script, session_id, gpu=None, do_nohup=True, optim=False, end_by=None\n):\n err_path = os.path.join(run_path, \"err\")\n out_path = os.path.join(run_path, \"out\")\n wrap_err_path = os.path.join(run_path, \"nohup.err\" if do_nohup else \"sh.err\")\n wrap_out_path = os.path.join(run_path, \"nohup.out\" if do_nohup else \"sh.out\")\n cfg_path = os.path.join(run_path, \"cfg.yaml\")\n start_path = os.path.join(run_path, \".__start\")\n end_path = os.path.join(run_path, \".__end\")\n crash_path = os.path.join(run_path, \".__crash\")\n env_vars = \"\"\n\n with open(cfg_path) as handler:\n title = yaml.load(handler, Loader=yaml.SafeLoader)[\"title\"]\n\n if gpu is not None:\n env_vars = f\"CUDA_VISIBLE_DEVICES={gpu} {env_vars:s}\"\n\n if end_by is not None:\n env_vars += f\" ENDBY={end_by}\"\n\n flags = \"-u -OO\" if optim else \"-u\"\n\n py_cmd = f\"python {flags} {py_script:s} {cfg_path:s} --session-id {session_id}\"\n\n if do_nohup:\n cmd = (\n f\" date +%s 1> {start_path:s} 2>/dev/null &&\"\n f\" nohup sh -c '{env_vars:s} {py_cmd:s}\"\n f\" 2>{err_path:s} 1>{out_path:s}\"\n f\" && date +%s > {end_path:s}\"\n f\" || date +%s > {crash_path:s}'\"\n f\" 1> {wrap_out_path} 2> {wrap_err_path}\"\n f\" & echo $!\"\n )\n else:\n cmd = (\n f\" date +%s 1> {start_path:s} 2>/dev/null &&\"\n f\" sh -c '{env_vars:s} {py_cmd:s}\"\n f\" 2>{err_path:s} 1>{out_path:s}\"\n f\" && date +%s > {end_path:s}\"\n f\" || date +%s > {crash_path:s}'\"\n f\" 1>{wrap_out_path} 2>{wrap_err_path}\"\n f\" & echo $!\"\n )\n\n print(f\"[{time.strftime(time.ctime())}] Command to be run:\\n{cmd:s}\")\n sys.stdout.flush()\n\n proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True\n )\n (out, err) = proc.communicate()\n err = err.decode(\"utf-8\").strip()\n if err:\n print(f\"[{time.strftime(time.ctime())}] Some error: {clr(err, 'red'):s}.\")\n pid = int(out.decode(\"utf-8\").strip())\n print(f\"[{time.strftime(time.ctime())}] New PID is {pid:d}.\")\n sys.stdout.flush()\n return pid, gpu, title, py_cmd", "def locate_cuda():\n nvcc_bin = 'nvcc'\n if sys.platform.startswith(\"win\"):\n nvcc_bin = 'nvcc.exe'\n\n # check env variables CUDA_HOME, CUDAHOME, CUDA_PATH.\n found = False\n for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:\n if env_name not in os.environ:\n continue\n found = True\n home = os.environ[env_name]\n nvcc = os.path.join(home, 'bin', nvcc_bin)\n break\n if not found:\n # otherwise, search the PATH for NVCC\n nvcc = find_in_path(nvcc_bin, os.environ['PATH'])\n if nvcc is None:\n logging.warning('The nvcc binary could not be located in your '\n '$PATH. Either add it to '\n 'your path, or set $CUDA_HOME to enable CUDA extensions')\n return None\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home': home,\n 'nvcc': nvcc,\n 'include': os.path.join(home, 'include'),\n 'lib64': os.path.join(home, 'lib64')}\n cuda_ver = os.path.basename(os.path.realpath(home)).split(\"-\")[1].split(\".\")\n major, minor = int(cuda_ver[0]), int(cuda_ver[1])\n cuda_ver = 10 * major + minor\n assert cuda_ver >= 70, f\"too low cuda ver {major}.{minor}\"\n print(f\"cuda_ver: {major}.{minor}\")\n arch = get_cuda_arch(cuda_ver)\n sm_list = get_cuda_sm_list(cuda_ver)\n compute = get_cuda_compute(cuda_ver)\n post_args = [f\"-arch=sm_{arch}\"] + \\\n [f\"-gencode=arch=compute_{sm},code=sm_{sm}\" for sm in sm_list] + \\\n [f\"-gencode=arch=compute_{compute},code=compute_{compute}\",\n \"--ptxas-options=-v\", \"-O2\"]\n print(f\"nvcc post args: {post_args}\")\n if HALF_PRECISION:\n post_args = [flag for flag in post_args if \"52\" not in flag]\n\n if sys.platform == \"win32\":\n cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')\n post_args += ['-Xcompiler', '/MD', '-std=c++14', \"-Xcompiler\", \"/openmp\"]\n if HALF_PRECISION:\n post_args += [\"-Xcompiler\", \"/D HALF_PRECISION\"]\n else:\n post_args += ['-c', '--compiler-options', \"'-fPIC'\",\n \"--compiler-options\", \"'-std=c++14'\"]\n if HALF_PRECISION:\n post_args += [\"--compiler-options\", \"'-D HALF_PRECISION'\"]\n for k, val in cudaconfig.items():\n if not os.path.exists(val):\n logging.warning('The CUDA %s path could not be located in %s', k, val)\n return None\n\n cudaconfig['post_args'] = post_args\n return cudaconfig", "def launch_training_job(model_dir,job_name, params, implementation_dir):\n # Create a new folder in implementation corresponding to the model\n implementation_dir = os.path.join(implementation_dir, os.path.basename(os.path.normpath(model_dir)))\n if not os.path.exists(implementation_dir):\n os.makedirs(implementation_dir)\n \n implementation_hyperparams_dir = os.path.join(implementation_dir, job_name)\n if not os.path.exists(implementation_hyperparams_dir):\n os.makedirs(implementation_hyperparams_dir)\n \n params.implementation_dir = implementation_hyperparams_dir + \"/\"\n \n # Write parameters in json file\n json_path = os.path.join(implementation_hyperparams_dir, 'params.json')\n params.save(json_path)\n\n # Launch training with this config\n cmd = \"{python} {model_dir}/train_C3D.py --params={json_path}\".format(python=PYTHON, model_dir=model_dir, json_path=json_path)\n #print(cmd)\n \n\n #NOT GENERALIZABLE -- READ IN TEMPLATE AND APPEND?\n f = open(os.path.join(implementation_hyperparams_dir, ('run_' + job_name + '.test')), 'w+')\n f.write(\"#!/bin/bash\\n\")\n f.write(\"\\n\")\n f.write(\"#SBATCH --job-name=iterate{}\\n\".format(job_name))\n f.write(\"#SBATCH --nodes=1\\n\")\n f.write(\"#SBATCH --mem=100GB\\n\") \n f.write(\"#SBATCH --time=12:00:00\\n\")\n f.write(\"#SBATCH --gres=gpu:1 -c1\\n\")\n f.write(\"#SBATCH --cpus-per-task=1\\n\")\n f.write(\"#SBATCH --error={}.out\\n\".format(model_dir + \"/\" + job_name))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"module purge\\n\")\n f.write(\"module load python3/intel/3.5.3\\n\")\n f.write(\"module load pillow/intel/4.0.0\\n\")\n f.write(\"module load scikit-learn/intel/0.18.1\\n\")\n f.write(\"module load pytorch/python3.5/0.2.0_3\\n\")\n f.write(\"module load numpy/intel/1.13.1 \\n\")\n f.write(\"module load cuda/8.0.44\\n\")\n f.write(\"module load jupyter-kernels/py3.5\\n\")\n f.write(\"module load mysql/5.7.17\\n\")\n f.write(\"module load zeromq/intel/4.2.0\\n\")\n f.write(\"module load intel/17.0.1\\n\")\n f.write(\"module load zlib/intel/1.2.8\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(cmd)\n f.close()\n\n file=(implementation_hyperparams_dir +'/run_' + job_name + '.test')\n sbatch_call = \"sbatch \" + file\n print(sbatch_call)\n call(sbatch_call, shell=True)", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def script_generator(self):\n\n self._get_free_tcp_port()\n\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--resume_from latest.pth \"\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def mpii_setup():\n base_command = (\n 'python mpii.py '\n '--train_file %train_file% '\n '--test_file %test_file% '\n '--arch %arch% '\n '--feat_keys %feat_keys% '\n '--out_dir %out_dir% '\n '--learning_rate %learning_rate% '\n '--lstm_hidden_dim %lstm_hidden_dim% '\n '--image_dir /localhome/kwaki/frames '\n '--cuda_device 0 '\n '--hantman_mini_batch=10 '\n '--hantman_perframeloss=WEIGHTED_MSE '\n '--seq_len=5000 '\n '--total_epochs=100 '\n '--hantman_perframe_weight=100.0 '\n '--hantman_struct_weight=1.0 '\n '--hantman_tp=10.0 '\n '--hantman_fp=0.25 '\n '--hantman_fn=20.0 '\n '--reweight --normalize'\n )\n\n # main parameters\n # 'val_file': '/nrs/branson/kwaki/data/20180328_mpiicooking2',\n main_params = {\n 'train_file': '/nrs/branson/kwaki/data/20180328_mpiicooking2/temp_data/hdf5/train.hdf5',\n 'test_file': '/nrs/branson/kwaki/data/20180328_mpiicooking2/temp_data/hdf5/test.hdf5',\n 'arch': 'bidirconcat',\n 'feat_keys': 'vgg',\n 'out_dir': '',\n 'learning_rate': '',\n 'lstm_hidden_dim': ''\n }\n\n # learning_rates = [0.01, 0.001, 0.0001]\n # hidden_dims = [64, 128, 256, 512]\n learning_rates = [0.0001]\n hidden_dims = [256]\n\n output_dir = '/nrs/branson/kwaki/outputs/20180403_mpii_sweep_test'\n # output_dir = '/nrs/branson/kwaki/outputs/20180411_mpii_tests'\n\n return base_command, main_params, learning_rates, hidden_dims, output_dir", "def initialize_multigpu_train(\n rdzv_endpoint: str,\n rank: int,\n local_rank: int,\n gpu_ids: List[int],\n world_size: int,\n ):\n\n host, port = rdzv_endpoint.split(\":\")\n os.environ[\"MASTER_ADDR\"] = host\n os.environ[\"MASTER_PORT\"] = port\n os.environ[\"LOCAL_WORLD_SIZE\"] = str(len(gpu_ids))\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(rank)", "def UpdateScriptForSmallGpuMem(vm: virtual_machine.BaseVirtualMachine) -> None:\n if nvidia_driver.GetGpuMem(vm) < 80000 and nvidia_driver.QueryNumberOfGpus(\n vm) >= 8:\n # A100 40G fails out of memory when creating dummy_eval_data on one GPU.\n data_script = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks/resnet/implementations/mxnet/common/data.py'\n vm_util.ReplaceText(vm, r\"mx\\.Context\\('gpu'\\)\",\n 'mx.gpu(hvd.local_rank())', data_script)", "def generate_disttrain_scipts(self):\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return", "def generate_singletrain_scipts(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s %s \"%(py, train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def submit_jobs(args, udf_command):\n hosts = []\n thread_list = []\n server_count_per_machine = 0\n\n # Get the host addresses of the cluster.\n ip_config = args.ip_config\n with open(ip_config) as f:\n for line in f:\n result = line.strip().split()\n if len(result) >= 3:\n ip = result[0]\n host = result[2]\n hosts.append((ip, host))\n else:\n raise RuntimeError(\"Format error of ip_config.\")\n server_count_per_machine = args.num_servers\n assert args.num_parts == len(hosts), \\\n 'The number of graph partitions has to match the number of machines in the cluster.'\n\n tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts)\n # launch server tasks\n server_cmd = 'DGL_ROLE=server DGL_NUM_SAMPLER=' + str(args.num_samplers)\n server_cmd = server_cmd + ' ' + 'OMP_NUM_THREADS=' + str(args.num_server_threads)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n server_cmd = server_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n server_cmd = server_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n for i in range(len(hosts)*server_count_per_machine):\n _, pod_name = hosts[int(i / server_count_per_machine)]\n cmd = server_cmd + ' ' + 'DGL_SERVER_ID=' + str(i)\n cmd = cmd + ' ' + udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n # launch client tasks\n client_cmd = 'DGL_DIST_MODE=\"distributed\" DGL_ROLE=client DGL_NUM_SAMPLER=' + str(args.num_samplers)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n client_cmd = client_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n client_cmd = client_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n if os.environ.get('OMP_NUM_THREADS') is not None:\n client_cmd = client_cmd + ' ' + 'OMP_NUM_THREADS=' + os.environ.get('OMP_NUM_THREADS')\n if os.environ.get('PYTHONPATH') is not None:\n client_cmd = client_cmd + ' ' + 'PYTHONPATH=' + os.environ.get('PYTHONPATH')\n\n torch_cmd = '-m torch.distributed.launch'\n torch_cmd = torch_cmd + ' ' + '--nproc_per_node=' + str(args.num_trainers)\n torch_cmd = torch_cmd + ' ' + '--nnodes=' + str(len(hosts))\n torch_cmd = torch_cmd + ' ' + '--node_rank=' + str(0)\n torch_cmd = torch_cmd + ' ' + '--master_addr=' + str(hosts[0][0])\n torch_cmd = torch_cmd + ' ' + '--master_port=' + str(1234)\n for node_id, tu in enumerate(hosts):\n _, pod_name = tu\n new_torch_cmd = torch_cmd.replace('node_rank=0', 'node_rank='+str(node_id))\n if 'python3' in udf_command:\n new_udf_command = udf_command.replace('python3', 'python3 ' + new_torch_cmd)\n elif 'python2' in udf_command:\n new_udf_command = udf_command.replace('python2', 'python2 ' + new_torch_cmd)\n else:\n new_udf_command = udf_command.replace('python', 'python ' + new_torch_cmd)\n cmd = client_cmd + ' ' + new_udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n\n for thread in thread_list:\n thread.join()", "def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return", "def setupGPUSwept(solver):\n solver.gpuBlock = (slice(0,solver.sharedShape[0],1),)+solver.gpuBlock\n getGPUReadBlockSwept(solver) #Finish creating gpuReadBlock here\n blockShape =[element.stop for element in solver.gpuBlock]\n blockShape[-1] += int(2*solver.blocksize[0]) #Adding 2 blocks in the column direction\n # Creating local GPU array with split\n grid = (int((blockShape[2])/solver.blocksize[0]),int((blockShape[3])/solver.blocksize[1])) #Grid size\n #Creating constants\n bsp = lambda x: int(numpy.prod(blockShape[x:])) #block shape product returned as an integer\n const_dict = ({\"NV\":blockShape[1],'SX':blockShape[2],'SY':blockShape[3],\"VARS\":bsp(2),\"TIMES\":bsp(1),\"MPSS\":solver.maxPyramidSize,\"MOSS\":solver.maxOctSize,\"OPS\":solver.operating,\"ITS\":solver.intermediate})\n solver.GPUArray = mallocGPUArray(blockShape) #Allocated GPU\n solver.localGPUArray = numpy.zeros(blockShape)\n #Building CUDA source code\n solver.gpu = io.buildGPUSource(solver.gpu)\n io.copyConstants(solver.gpu,const_dict) #This copys cpu constants not global constants\n solver.cpu.set_globals(*solver.globals,source_mod=solver.gpu)\n # Make GPU geometry\n solver.Up.initializeGPU(solver.gpu.get_function(\"UpPyramid\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Oct.initializeGPU(solver.gpu.get_function(\"Octahedron\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Down.initializeGPU(solver.gpu.get_function(\"DownPyramid\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Yb.initializeGPU(solver.gpu.get_function(\"YBridge\"),solver.blocksize,grid)\n solver.Xb.initializeGPU(solver.gpu.get_function(\"XBridge\"),solver.blocksize,grid)", "def make_worker_run_script(master_private_ip: str, run_command: str):\n return (f'sudo -H -u ubuntu bash -c '\n f'\\'source /home/ubuntu/.bashrc && export PYTHONPATH=. && '\n 'MKL_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 '\n f'{run_command} '\n f'--master_host {master_private_ip} '\n f'--relay_socket_path {REMOTE_MASTER_SOCKET_PATH} '\n f'>> /home/ubuntu/user_data.log 2>&1\\'')", "def worker(session, args):\n \n #print(\"Hello \" + args.jobid + \"!\")\n session.login(args.vcnc)\n #\n # Lookup the grid job on the vcnc\n # Exit if you don't find it.\n response = grid.get(session, args.jobid)\n if (response['status_code'] != 200):\n return (1, response)\n #\n # Extract the PeerCache workspace name.\n workspace = json.loads(response['body'])['job_spec']['workspace_name']\n #\n # Check that it's still there and the same (TODO)\n\n #\n # Spawn a vp. Our wrapper script ensures we are in the installation\n # directory. Note that 'run(..)' is new in Python 3.5\n #\n if args.mount:\n mount_point = args.mount\n else:\n mount_point = '/tmp/velstor/'+args.jobid\n try:\n os.makedirs(mount_point)\n except FileExistsError as e:\n pass\n \n #\n # Mount the VP\n #\n vp_cmd = [\n './bin/vp'\n , '--mount={}'.format(mount_point)\n , '--mentor={}'.format(args.vpm)\n , '--workspace={}'.format(workspace)\n ]\n result = subprocess.run(vp_cmd)\n #\n # Start an xterm.\n #\n # We start an xterm whether or not the vp mount fails. If everything\n # is working correctly, the vp mount fails because there already\n # is a vp open. If so, then opening another terminal window\n # at the mount point is a feature.\n #\n # If things aren't working, then the user has to fix the underlying\n # problem and also manually close the terminal windows. Hopefully\n # this will not be the common case.\n #\n subprocess.run(['xterm', '-e', 'cd {}; bash'.format(mount_point)])\n #\n # We want to automagically unmount the VP when the user is finished.\n # But how do we know when the user is finished? There are better\n # (but expensive and complicated) and worse (but simpler) ways to \n # do this.\n #\n # What we do here is: the user is done when (1) the original xterm\n # window has been closed and also (2) there are no processes whose\n # current directory is within the mount point. \n #\n # We achieve that by doing a lazy ( -z ) fuse unmount, but only if\n # we are the the original terminal (that is, the terminal that\n # originally successfully mounted the vp).\n #\n if result.returncode == 0:\n subprocess.run(['fusermount', '-uz', '{}'.format(mount_point)])\n #\n # Always report success\n #\n return 0, ''", "def run(run_py, func):\n args = eval(\"test_args.{}\".format(func))\n print(args)\n\n res = {}\n\n default_args = {}\n for arg, value in args.items():\n default_args[arg] = value[0]\n\n current_args = dict2argstr(default_args)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n for arg, value in args.items():\n if len(value) <= 1:\n continue\n current_args_dict = copy.deepcopy(default_args)\n for item in value[1:]:\n current_args_dict[arg] = item\n current_args = dict2argstr(current_args_dict)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n total_num = len(res)\n fail_num = 0\n for cmd, status in res.items():\n if status == \"FAIL\":\n fail_num += 1\n print(\"-\" * 30)\n print(\"Failure Rate: {} / {}\".format(str(fail_num), str(total_num)))\n print(\"-\" * 30)\n print(\"Detail:\")\n for cmd, status in res.items():\n print(\"{} : {}\".format(status, cmd))", "def create_slurm_file(\n slurm_filepath: Path, batch_size: int, num_batches: int, time_limit: int\n):\n slurm_string = f\"\"\"#!/usr/bin/bash\n#SBATCH --job-name=pctsp\n#SBATCH --partition=cpu-batch\n#SBATCH --ntasks=10\n#SBATCH --cpus-per-task=1\n#SBATCH --mem-per-cpu=4000\n#SBATCH --time={time_limit}:00:00\n#SBATCH --array=0-{num_batches-1}\n\n## Loop over each batch ##\nstart=$(($SLURM_ARRAY_TASK_ID * {batch_size}))\nsrun --ntasks=1 python scripts/batch_model.py $start {batch_size} \\\n\"\"\"\n slurm_filepath.write_text(slurm_string)", "def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12):\n try:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(gpu)\n g_conf.VARIABLE_WEIGHT = {}\n\n # At this point the log file with the correct naming is created.\n # You merge the yaml file with the global configuration structure.\n merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))\n set_type_of_process('train')\n\n # Set the process into loading status.\n coil_logger.add_message('Loading', {'GPU': gpu})\n\n # Put the output to a separate file if it is the case\n if suppress_output:\n if not os.path.exists('_output_logs'):\n os.mkdir('_output_logs')\n sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' +\n g_conf.PROCESS_NAME + '_' + str(os.getpid()) + \".out\"), \"a\",\n buffering=1)\n sys.stderr = open(os.path.join('_output_logs',\n exp_alias + '_err_'+g_conf.PROCESS_NAME + '_'\n + str(os.getpid()) + \".out\"),\n \"a\", buffering=1)\n\n if coil_logger.check_finish('train'):\n coil_logger.add_message('Finished', {})\n return\n\n # Preload option\n if g_conf.PRELOAD_MODEL_ALIAS is not None:\n checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH,\n g_conf.PRELOAD_MODEL_ALIAS,\n 'checkpoints',\n str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth'))\n\n\n # Get the latest checkpoint to be loaded\n # returns none if there are no checkpoints saved for this model\n checkpoint_file = get_latest_saved_checkpoint()\n if checkpoint_file is not None:\n checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias,\n 'checkpoints', str(get_latest_saved_checkpoint())))\n iteration = checkpoint['iteration']\n best_loss = checkpoint['best_loss']\n best_loss_iter = checkpoint['best_loss_iter']\n print ('iteration: ', iteration, 'best_loss: ', best_loss)\n else:\n iteration = 0\n best_loss = 10000.0\n best_loss_iter = 0\n\n\n # Define the dataset. This structure is has the __get_item__ redefined in a way\n # that you can access the positions from the root directory as a in a vector.\n full_dataset = os.path.join(os.environ[\"COIL_DATASET_PATH\"], g_conf.TRAIN_DATASET_NAME)\n\n # By instantiating the augmenter we get a callable that augment images and transform them into tensors.\n augmenter = Augmenter(g_conf.AUGMENTATION)\n\n # Instantiate the class used to read the dataset\n dataset = CoILDataset(full_dataset, transform=augmenter, preload_name=str(g_conf.NUMBER_OF_HOURS)+'hours_'+g_conf.TRAIN_DATASET_NAME)\n print (\"Loaded dataset\")\n \n # Creates the sampler, this part is responsible for managing the keys. It divides\n # all keys depending on the measurements and produces a set of keys for each bach.\n # define the sampling strategy for mini-batch, different samplers can be found in 'splitter.py'\n data_loader = select_balancing_strategy(dataset, iteration, number_of_workers)\n\n # Instatiate the network architecture\n model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)\n model.cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE) # adabound and adamio can also be used here\n\n if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None:\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n accumulated_time = checkpoint['total_time']\n loss_window = coil_logger.recover_loss_window('train', iteration)\n else: \n # We accumulate iteration time and keep the average speed\n accumulated_time = 0\n loss_window = []\n\n # freeze the perception module weights if required\n # for m in model.perception.parameters():\n # m.requires_grad = False\n \n # total trainable parameters\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n total_params = sum([np.prod(p.size()) for p in model_parameters])\n print ('trainable parameters: ', total_params)\n\n # multi-gpu\n print ('number of gpus: ', torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n criterion = Loss(g_conf.LOSS_FUNCTION)\n\n print ('Start Training')\n\n st = time.time()\n for data in data_loader:\n\n # use this for early stopping if the validation loss is not coming down\n if g_conf.FINISH_ON_VALIDATION_STALE is not None and \\\n check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE):\n break\n\n \"\"\"\n ####################################\n Main optimization loop\n ####################################\n \"\"\"\n\n iteration += 1\n\n if iteration % 1000 == 0:\n adjust_learning_rate_auto(optimizer, loss_window)\n \n # additional learning rate scheduler - cyclic cosine annealing (https://arxiv.org/pdf/1704.00109.pdf)\n # adjust_learning_rate_cosine_annealing(optimizer, loss_window, iteration)\n\n capture_time = time.time()\n controls = data['directions']\n model.zero_grad()\n branches = model(torch.squeeze(data['rgb'].cuda()),\n dataset.extract_inputs(data).cuda())\n loss_function_params = {\n 'branches': branches,\n 'targets': dataset.extract_targets(data).cuda(),\n 'controls': controls.cuda(),\n 'inputs': dataset.extract_inputs(data).cuda(),\n 'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,\n 'variable_weights': g_conf.VARIABLE_WEIGHT\n }\n loss, _ = criterion(loss_function_params)\n loss.backward()\n optimizer.step()\n \"\"\"\n ####################################\n Saving the model if necessary\n ####################################\n \"\"\"\n\n if is_ready_to_save(iteration):\n if torch.cuda.device_count() > 1:\n state_dict_save = model.module.state_dict()\n else:\n state_dict_save = model.state_dict()\n\n state = {\n 'iteration': iteration,\n 'state_dict': state_dict_save,\n 'best_loss': best_loss,\n 'total_time': accumulated_time,\n 'optimizer': optimizer.state_dict(),\n 'best_loss_iter': best_loss_iter\n }\n torch.save(state, os.path.join('_logs', exp_batch, exp_alias\n , 'checkpoints', str(iteration) + '.pth'))\n\n \"\"\"\n ################################################\n Adding tensorboard logs.\n Making calculations for logging purposes.\n These logs are monitored by the printer module.\n #################################################\n \"\"\"\n coil_logger.add_scalar('Loss', loss.data, iteration)\n coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration)\n if loss.data < best_loss:\n best_loss = loss.data.tolist()\n best_loss_iter = iteration\n\n # Log a random position\n position = random.randint(0, len(data) - 1)\n\n if torch.cuda.device_count() > 1:\n output = model.module.extract_branch(torch.stack(branches[0:4]), controls)\n else:\n output = model.extract_branch(torch.stack(branches[0:4]), controls)\n error = torch.abs(output - dataset.extract_targets(data).cuda())\n\n accumulated_time += time.time() - capture_time\n\n coil_logger.add_message('Iterating',\n {'Iteration': iteration,\n 'Loss': loss.data.tolist(),\n 'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time,\n 'BestLoss': best_loss, 'BestLossIteration': best_loss_iter,\n 'Output': output[position].data.tolist(),\n 'GroundTruth': dataset.extract_targets(data)[\n position].data.tolist(),\n 'Error': error[position].data.tolist(),\n 'Inputs': dataset.extract_inputs(data)[\n position].data.tolist()},\n iteration)\n loss_window.append(loss.data.tolist())\n coil_logger.write_on_error_csv('train', loss.data)\n print(\"Iteration: %d Loss: %f\" % (iteration, loss.data))\n st = time.time()\n\n coil_logger.add_message('Finished', {})\n \n except KeyboardInterrupt:\n coil_logger.add_message('Error', {'Message': 'Killed By User'})\n\n except RuntimeError as e:\n\n coil_logger.add_message('Error', {'Message': str(e)})\n\n except:\n traceback.print_exc()\n coil_logger.add_message('Error', {'Message': 'Something Happened'})", "def main():\n\n # Force scripts to not use graphical output\n env = dict()\n env.update(os.environ)\n\n if \"DISPLAY\" not in os.environ:\n # No DISPLAY, set suitable default matplotlib backend as pyplot is used\n env[\"MPLBACKEND\"] = \"Agg\"\n\n if \"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\" not in os.environ:\n env[\"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"] = str(multiprocessing.cpu_count())\n\n # Prevent user site packages from interfering with SCT dependencies (See issue #3067)\n env[\"PYTHONNOUSERSITE\"] = \"True\"\n\n command = os.path.basename(sys.argv[0])\n pkg_dir = os.path.dirname(sct.__file__)\n\n script = os.path.join(pkg_dir, \"scripts\", \"{}.py\".format(command))\n assert os.path.exists(script)\n\n cmd = [sys.executable, script] + sys.argv[1:]\n\n mpi_flags = os.environ.get(\"SCT_MPI_MODE\", None)\n if mpi_flags is not None:\n if mpi_flags == \"yes\": # compat\n mpi_flags = \"-n 1\"\n cmd = [\"mpiexec\"] + mpi_flags.split() + cmd\n\n os.execvpe(cmd[0], cmd[0:], env)", "def main(specification_dir, out_dir, num_gpus, exps_per_gpu):\n\n # 1. Load the specifications\n specs = load_specifications(specification_dir)\n \n # 2. Create the output directory\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n if os.listdir(out_dir):\n logger.warning(\"The output directory {} is not empty. Are you sure you want to continue?\".format(out_dir))\n # time.sleep(3)\n\n # 3. Create the workers with specific environment variables\n num_workers = num_gpus * exps_per_gpu\n\n with NonDaemonPool(num_workers) as pool:\n logger.info(\"Created {} workers\".format(num_workers))\n \n # Create the available device queue.\n m = multiprocessing.Manager()\n available_devices = m.Queue()\n for g in range(num_gpus):\n for _ in range(exps_per_gpu):\n available_devices.put(g)\n\n\n # 4. Create and distribute the workload\n workload = list(sorted([\n (spec, J(out_dir, spec[\"name\"]), available_devices) for spec in specs\n ], key=lambda x: (1 + 10000*x[0]['depth'])*x[0]['width']))\n \n logger.info(\"Running {} jobs accross {} GPUs\".format(len(workload), num_gpus))\n\n # 5. Launch the workers.\n logger.info(\"Launching the workers using `run_experiment`.\")\n list(pool.imap_unordered(\n launch_experiment_on_device,\n workload\n ))\n # pool.join()\n \n logger.info(\"Success, all experiments completed!\")", "def main():\n # Manual seed for reproducibility\n torch.manual_seed(363636)\n\n # Global instances\n global args, use_cuda, device\n # Instantiating the parser\n args = parser.parse_args()\n # Global CUDA flag\n use_cuda = args.cuda and torch.cuda.is_available()\n # Defining device and device's map locationo\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print('chosen device: ', device)\n\n # Defining loss function and printing CUDA information (if available)\n if use_cuda:\n print(\"PyTorch version: \")\n print(torch.__version__)\n print(\"CUDA Version: \")\n print(torch.version.cuda)\n print(\"cuDNN version is: \")\n print(cudnn.version())\n cudnn.benchmark = True\n criterion = nn.CrossEntropyLoss().cuda()\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Dataloaders for CIFAR, ImageNet and MNIST\n if args.dataset == 'CIFAR100':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.075),\n transforms.ToTensor(),\n normalize,\n Cutout(n_holes=1, length=16),\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n elif args.dataset == 'ImageNet':\n\n traindir = os.path.join(args.data_path, 'train')\n valdir = os.path.join(args.data_path, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(args.image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n image_size = args.image_size\n val_dataset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ]))\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=args.val_batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n elif args.dataset == 'MNIST':\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.val_batch_size, shuffle=True, **kwargs)\n\n elif args.dataset == 'CIFAR10':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n # original grid = [(1.0, 1.0), (1.9, 1.0), (1.7, 1.1), (1.6, 1.1), (1.4, 1.2), (1.2, 1.3), (1.0, 1.4)]\n\n grid = [(args.grid[i], args.grid[i+1]) for i in range(0, len(args.grid), 2)]\n\n for coeff in grid:\n alpha = coeff[0] ** args.phi\n beta = coeff[1] ** args.phi\n grid_search(train_loader, val_loader, criterion, alpha, beta)", "def run_child_process(\n train_func: Callable,\n output_path: str,\n rdzv_endpoint: str,\n rank: int,\n local_rank: int,\n gpu_ids: List[int],\n world_size: int,\n ):\n\n # initialize start method\n mp.set_start_method(method=None, force=True)\n\n gpus_arg_idx = sys.argv.index(\"--gpus\")\n for _ in range(2):\n sys.argv.pop(gpus_arg_idx)\n if \"--enable-hpo\" in sys.argv:\n sys.argv.remove(\"--enable-hpo\")\n set_arguments_to_argv([\"-o\", \"--output\"], output_path)\n set_arguments_to_argv(\"--rdzv-endpoint\", rdzv_endpoint)\n\n MultiGPUManager.initialize_multigpu_train(rdzv_endpoint, rank, local_rank, gpu_ids, world_size)\n\n threading.Thread(target=MultiGPUManager.check_parent_processes_alive, daemon=True).start()\n\n train_func()", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def cli(ctx, backend, networks, large_ops, tc_autotune, tc_cachedir, tc_at_generations,\n tc_at_population, tvm_driver, cuda_profile):\n tc_cachedir = os.path.expanduser(tc_cachedir)\n runner = ctx.ensure_object(core.Runner)\n frontend = Frontend(\n backend, OPS if large_ops else MS_OPS, {\n 'tc_autotune': tc_autotune,\n 'tc_cachedir': tc_cachedir,\n 'tc_at_generations': tc_at_generations,\n 'tc_at_population': tc_at_population,\n 'cuda_profile': cuda_profile,\n 'tvm_driver': str(tvm_driver)\n })\n\n return runner.run(frontend, backend, networks)", "def gen_cluster_script(\n crop,\n scheduler,\n batch_ids=None,\n *,\n mode=\"array\",\n num_procs=None,\n num_threads=None,\n num_nodes=None,\n num_workers=None,\n mem=None,\n mem_per_cpu=None,\n gigabytes=None,\n time=None,\n hours=None,\n minutes=None,\n seconds=None,\n conda_env=True,\n launcher=\"python\",\n setup=\"#\",\n shell_setup=\"\",\n mpi=False,\n temp_gigabytes=1,\n output_directory=None,\n debugging=False,\n **kwargs,\n):\n\n scheduler = scheduler.lower() # be case-insensitive for scheduler\n\n if scheduler not in (\"sge\", \"pbs\", \"slurm\"):\n raise ValueError(\"scheduler must be one of 'sge', 'pbs', or 'slurm'.\")\n\n if mode not in (\"array\", \"single\"):\n raise ValueError(\"mode must be one of 'array' or 'single'.\")\n\n # parse the number of threads\n if num_threads is None:\n if num_workers is None:\n # default to 1 thread per core for no workers\n num_threads = num_procs\n else:\n # default to 1 thread per worker\n num_threads = round(num_procs / num_workers)\n\n # parse the time requirement\n if hours is minutes is seconds is None:\n if time is not None:\n if isinstance(time, (int, float)):\n hours = time\n minutes, seconds = 0, 0\n elif isinstance(time, str):\n hours, minutes, seconds = time.split(\":\")\n else:\n hours, minutes, seconds = 1, 0, 0\n else:\n if time is not None:\n raise ValueError(\n \"Cannot specify both time and hours, minutes, seconds.\"\n )\n hours = 0 if hours is None else int(hours)\n minutes = 0 if minutes is None else int(minutes)\n seconds = 0 if seconds is None else int(seconds)\n\n if scheduler == \"slurm\":\n # only supply specified header options\n # TODO: same with PBS and SGE\n\n if num_nodes is not None:\n kwargs[\"nodes\"] = num_nodes\n if num_procs is not None:\n kwargs[\"cpus-per-task\"] = num_procs\n\n if gigabytes is not None:\n if mem is not None:\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n mem = gigabytes\n\n if mem is not None:\n if isinstance(mem, int):\n mem = f\"{mem}G\"\n kwargs[\"mem\"] = mem\n\n if mem_per_cpu is not None:\n if isinstance(mem_per_cpu, int):\n mem_per_cpu = f\"{mem_per_cpu}G\"\n kwargs[\"mem-per-cpu\"] = mem_per_cpu\n\n else:\n # pbs, sge\n # parse memory to gigabytes\n if (gigabytes is not None) and (mem is not None):\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n\n if mem is not None:\n # take gigabytes from mem\n gigabytes = int(mem)\n\n if output_directory is None:\n from os.path import expanduser\n\n home = expanduser(\"~\")\n output_directory = os.path.join(home, \"Scratch\", \"output\")\n\n if conda_env is True:\n # automatically set conda environment to be the\n # same as the one that's running this function\n conda_env = os.environ.get(\"CONDA_DEFAULT_ENV\", False)\n if conda_env:\n # but only if we are in a conda environment\n if (\"conda activate\" in shell_setup) or (\n \"mamba activate\" in shell_setup\n ):\n # and user is not already explicitly activating\n conda_env = False\n\n if isinstance(conda_env, str):\n # should now be a string\n shell_setup += f\"\\nconda activate {conda_env}\"\n elif conda_env is not False:\n raise ValueError(\n \"conda_env must be either ``False``, \"\n f\"``True`` or a string, not {conda_env}\"\n )\n\n crop.calc_progress()\n\n if kwargs:\n if scheduler == \"slurm\":\n header_options = \"\\n\".join([\n f\"#SBATCH --{k}\"\n if (v is None or v is True) else\n f\"#SBATCH --{k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"pbs\":\n header_options = \"\\n\".join([\n f\"#PBS -l {k}\"\n if (v is None or v is True) else\n f\"#PBS -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"sge\":\n header_options = \"\\n\".join([\n f\"#$ -l {k}\"\n if (v is None or v is True) else\n f\"#$ -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n else:\n header_options = \"\"\n\n if num_threads is None:\n if mpi:\n # assume single thread per rank\n num_threads = 1\n else:\n if num_workers is None:\n # assume all multithreading over all cores\n num_threads = num_procs\n else:\n # assume each worker has equal number of threads\n num_threads = max(1, num_procs // num_workers)\n\n if num_workers is not None:\n if num_workers * num_threads != num_procs:\n warnings.warn(\n f\"num_workers * num_threads ({num_workers} * {num_threads}) \"\n f\"!= num_procs ({num_procs}), may not be computationally \"\n \"efficient.\"\n )\n\n # get absolute path\n full_parent_dir = str(pathlib.Path(crop.parent_dir).expanduser().resolve())\n\n opts = {\n \"hours\": hours,\n \"minutes\": minutes,\n \"seconds\": seconds,\n \"gigabytes\": gigabytes,\n \"name\": crop.name,\n \"parent_dir\": full_parent_dir,\n \"num_procs\": num_procs,\n \"num_threads\": num_threads,\n \"num_nodes\": num_nodes,\n \"num_workers\": num_workers,\n \"launcher\": launcher,\n \"setup\": setup,\n \"shell_setup\": shell_setup,\n \"pe\": \"mpi\" if mpi else \"smp\",\n \"temp_gigabytes\": temp_gigabytes,\n \"output_directory\": output_directory,\n \"working_directory\": full_parent_dir,\n \"header_options\": header_options,\n \"debugging\": debugging,\n }\n\n if batch_ids is not None:\n # grow specific ids\n opts[\"batch_ids\"] = tuple(batch_ids)\n array_mode = \"partial\"\n elif crop.num_results == 0:\n # grow all ids\n opts[\"batch_ids\"] = range(1, crop.num_batches + 1)\n array_mode = \"all\"\n else:\n # find missing ids and grow them\n opts[\"batch_ids\"] = crop.missing_results()\n array_mode = \"partial\"\n\n # build the script!\n\n if scheduler == \"sge\":\n script = _SGE_HEADER\n if mode == \"array\":\n script += _SGE_ARRAY_HEADER\n elif scheduler == \"pbs\":\n script = _PBS_HEADER\n if mode == \"array\":\n script += _PBS_ARRAY_HEADER\n elif scheduler == \"slurm\":\n script = _SLURM_HEADER\n if mode == \"array\":\n script += _SLURM_ARRAY_HEADER\n\n script += _BASE\n\n if mode == \"array\":\n opts[\"run_start\"] = 1\n\n if array_mode == \"all\":\n opts[\"run_stop\"] = crop.num_batches\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_ALL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_ALL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_ALL_SCRIPT\n\n elif array_mode == \"partial\":\n opts[\"run_stop\"] = len(opts[\"batch_ids\"])\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT\n\n elif mode == \"single\":\n if batch_ids is None:\n # grow all missing, but compute the list dynamically\n # this allows the job to be restarted\n opts[\"batch_ids\"] = \"crop.missing_results()\"\n script += _BASE_CLUSTER_GROW_SINGLE\n\n script += _BASE_CLUSTER_SCRIPT_END\n script = script.format(**opts)\n\n if (scheduler == \"pbs\") and len(opts[\"batch_ids\"]) == 1:\n # PBS can't handle arrays jobs of size 1...\n script = script.replace(\"#PBS -J 1-1\\n\", \"\").replace(\n \"$PBS_ARRAY_INDEX\", \"1\"\n )\n\n return script", "def _UpdateScripts(benchmark_spec, vm):\n benchmark = benchmark_spec.benchmark\n vm = vm or benchmark_spec.vms[0]\n\n config_sed = []\n config_sed += [(r'DGXSYSTEM=.*', fr'DGXSYSTEM=\\\"{DGXSYSTEM}\\\"')]\n gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)\n config_sed.append((\n r'DGXNGPU=.*', fr'DGXNGPU={gpus_per_node}\\n'\n fr'export CUDA_VISIBLE_DEVICES={\",\".join([str(gpu_number) for gpu_number in range(gpus_per_node)])}'\n ))\n config_sed += [(r'DGXNSOCKET=.*',\n fr'DGXNSOCKET={vm.CheckLsCpu().socket_count}')]\n config_sed += [(r'DGXSOCKETCORES=.*',\n fr'DGXSOCKETCORES={vm.CheckLsCpu().cores_per_socket}')]\n\n model = 'maskrcnn' if MASK in benchmark else benchmark\n framework = 'mxnet' if RESNET in benchmark else 'pytorch'\n script_path = (\n fr'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks/{model}/'\n fr'implementations/{framework}')\n\n config_files = [CONFIG]\n\n if MASK in benchmark:\n config_sed = _GetChangesForMask(config_sed)\n config_files = ['config_DGXA100.sh']\n\n elif RESNET in benchmark:\n config_sed = _GetChangesForResnet(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100.sh']\n UpdateScriptForSmallGpuMem(vm)\n\n elif BERT in benchmark:\n config_sed = _GetChangesForBert(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100_1x8x56x1.sh']\n\n vm.RemoteCommand(\n f'cd {script_path} && '\n f'sed \"{SedPairsToString(config_sed)}\" '\n f'{\" \".join(config_files)} > {CONFIG} && '\n f'chmod 755 {CONFIG} && '\n f'sed -i \"2 i source {CONFIG}\" run_and_time.sh && '\n f'sed -i \"2 i source {CONFIG}\" run_with_docker.sh')", "def main():\n # Fix random seed.\n torch.manual_seed(0)\n\n # Create checkpoint directory.\n try:\n os.mkdir('checkpoints')\n except FileExistsError:\n pass\n\n # Make preparations.\n args = get_args()\n logger = get_logger()\n data_train, data_val, data_test = get_data(args.batch_size,\n args.num_workers)\n model = get_model()\n\n # Log command arguments.\n logger.info(' '.join(sys.argv))\n logger.info(vars(args))\n\n # Send the model to the GPU, if enabled and available.\n if args.cuda:\n model = model.cuda()\n\n # Create the loss function and optimizer.\n loss_function = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum)\n\n # Load checkpoint, if given.\n if args.checkpoint:\n load_checkpoint(args.checkpoint, model, optimizer)\n\n # Loop epochs.\n for epoch in range(args.num_epochs):\n logger.info(f'Epoch {epoch}:')\n\n mean_loss = train(model, loss_function, optimizer, data_train)\n logger.info(f' - [training] mean loss: {mean_loss:.3f}')\n\n accuracy = evaluate(model, data_val)\n logger.info(f' - [validation] accuracy: {accuracy:.3f}')\n\n torch.save([model.state_dict(), optimizer.state_dict()],\n os.path.join('checkpoints', f'{epoch}.pth'))\n\n # Run final evaluation on the test data.\n logger.info('Test:')\n accuracy = evaluate(model, data_test)\n logger.info(f' - [test] accuracy: {accuracy:.3f}')", "def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)" ]
[ "0.58565634", "0.5829015", "0.56408805", "0.5608197", "0.5572243", "0.5511401", "0.5470014", "0.54624295", "0.5433461", "0.5427186", "0.54209", "0.54072654", "0.54013383", "0.5391396", "0.5360414", "0.5359389", "0.5348898", "0.5342166", "0.5309496", "0.5298081", "0.52940387", "0.5284247", "0.52790684", "0.52756774", "0.52711403", "0.52569705", "0.52453804", "0.5243406", "0.5222519", "0.5220015" ]
0.67427
0
Create symlinks between all the files inside the generator folders provided as a glob string and the input folder destination
def move_generators_to_input(self, generator_folder_glob): spawn_folder_names = [] generator_folders = glob(generator_folder_glob) for i, folder in enumerate(generator_folders): base_name = 'e01s{:02d}_{}f0000'.format(i + 1, os.path.basename(folder)) input_destination = os.path.join(self.input_folder, base_name) data_destination = os.path.join(self.data_folder, base_name) create_folder(input_destination) create_folder(data_destination) spawn_folder_names.append(input_destination) create_symlinks( files=os.path.join(folder, '*'), dst_folder=os.path.relpath(input_destination) ) return spawn_folder_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return", "def makeLinks(files, folderName='SimFiles'):\n\n from os import symlink\n from os import chdir\n\n groups = { 1 : [\"Cos0.5\",\"Cos0.7\"],\n 2 : [\"Cos0.6\",\"Cos0.9\"],\n 3 : [\"Cos0.8\",\"Cos1.0\"]}\n\n for filename in files:\n for group,angles in groups.iteritems():\n if any(x in filename for x in angles):\n chdir(folderName + str(group))\n symlink('../'+filename, filename)\n chdir('../')", "def cpsym(src,dest):\n \n src = os.path.normpath(src)\n dest = os.path.normpath(dest)\n \n if not os.path.exists(src):\n return\n \n for dirpath,dirnames,filenames in os.walk(src):\n rel_dirpath = os.path.relpath(dirpath,src)\n dest_dirpath = os.path.join(dest,rel_dirpath)\n mkdir(dest_dirpath,isfull=True)\n \n for filename in filenames:\n src_filename = os.path.join(dirpath,filename)\n rel_filename = os.path.relpath(src_filename,src)\n \n dest_filename = os.path.join(dest,rel_filename)\n try:\n os.symlink(src_filename,dest_filename)\n except OSError:\n pass", "def convert_relative_symlinks(template_dir, out_dir):\n for root, dirs, files in os.walk(out_dir):\n for filename in files:\n filepath = os.path.join(root, filename)\n if os.path.islink(filepath):\n linkto = os.readlink(filepath)\n if linkto.startswith('.'):\n os.remove(filepath)\n start_dir = os.path.relpath(root, out_dir)\n os.symlink(os.path.join(template_dir, start_dir, filename), filepath)", "def symlink_images(output_path):\n image_dest = Path(output_path / \"assets/images/integrations\")\n image_dest.mkdir(mode=0o755, parents=True, exist_ok=True)\n\n processed = set()\n for img_file in sorted(INTEGRATIONS_PATH.glob(\"**/img/*.png\")):\n if not img_file.is_file():\n continue\n\n dest_path = image_dest / img_file.name\n\n if img_file.name in processed:\n # print(f\"WARNING image file {img_file} has duplicate name\")\n continue\n processed.add(img_file.name)\n\n dest_path.symlink_to(img_file)", "def symlink_input(filegroup_resource_path, temp_dir, strip_prefix=None,\n copy=False):\n assert os.path.isdir(temp_dir)\n manifest = runfiles.Create()\n with open(manifest.Rlocation(filegroup_resource_path)) as f:\n input_filenames = f.read().splitlines()\n for name in input_filenames:\n orig_name = manifest.Rlocation(name)\n assert os.path.exists(orig_name), name\n dest_name = name\n for prefix in (strip_prefix or []):\n if dest_name.startswith(prefix):\n dest_name = dest_name[len(prefix):]\n break\n temp_name = join(temp_dir, dest_name)\n os.makedirs(os.path.dirname(temp_name), exist_ok=True)\n if copy:\n shutil.copy(orig_name, temp_name)\n else:\n os.symlink(orig_name, temp_name)", "def transfer_files_from_dir_link(\n source_filepath_list: Iterable[str],\n destination_filepath_list: Iterable[str],\n force_overwrite: bool = False,\n do_relative_link: bool = False\n) -> None:\n hide_progress_bar = logger.getEffectiveLevel() > logging.INFO\n for src, dst in tqdm(zip(source_filepath_list, destination_filepath_list), disable=hide_progress_bar):\n os.makedirs(path.dirname(dst), exist_ok=True)\n if force_overwrite and path.lexists(dst):\n os.remove(dst)\n try: # on windows, symlink requires some privileges, and may crash if not\n if do_relative_link:\n src = path.relpath(src, path.dirname(dst))\n os.symlink(src, dst)\n except OSError as e:\n logger.critical('unable to create symlink on image directory, due to privilege restrictions.')\n raise e", "def create_symlink_dir(src_dir, src_list, dst):\n if not src_list:\n return\n message = \"creating symlink directory at {dst} with files {src_list}\".format(\n dst=dst,\n src_list=pformat(src_list))\n logging.info(message)\n if not os.path.exists(dst):\n os.makedirs(dst)\n for src_file in src_list:\n if not src_file:\n continue\n source = os.path.join(src_dir, src_file)\n destination = os.path.join(dst, src_file)\n if os.path.lexists(destination):\n continue\n try:\n os.symlink(source, destination)\n except Exception as e:\n msg = format_debug(e)\n logging.error(e)", "def generateLink(folder, filename):\n if not folder.endswith('/'):\n folder += \"/\"\n\n return folder + os.path.basename(filename)", "def test_create_symlink_file(self):\n pass", "def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)", "def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):\n for src_path in symlinks_to_create:\n trg_path = os.path.join(target_dir, os.path.basename(src_path))\n\n if os.path.islink(src_path):\n # Let's not create symlinks to symlinks\n # Since dropping the current symlink will break the experiment\n os.symlink(os.readlink(src_path), trg_path)\n else:\n print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')\n os.symlink(src_path, trg_path)", "def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))", "def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)", "def create_links(list_of_paths, dest_dir, print_cfg_ipol=False):\n ms = False\n for i, f in enumerate(list_of_paths):\n\n if isinstance(f, tuple): # we have the ms image\n # tif ms\n ms = True\n symlink_p(f[1], os.path.join(dest_dir, 'im_ms_%02d.tif' % (i+1)))\n\n # preview ms\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f[1]), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # enhance contrast\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp)\n else:\n print('MS PREVIEW not found for %s' % f[1], file=sys.stderr)\n f = f[0] # the path to ms preview is not needed anymore\n\n # pan preview (if no ms preview)\n if not os.path.isfile(os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1))):\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp))\n else:\n print('PAN PREVIEW not found for %s' % f, file=sys.stderr)\n\n # dim\n tmp = copy_file_matching_pathname('DIM_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'dim_%02d.xml' % (i+1)))\n\n # rpc\n tmp = copy_file_matching_pathname('RPC_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'rpc_%02d.xml' % (i+1)))\n\n # tif panchro\n symlink_p(f, os.path.join(dest_dir, 'im_panchro_%02d.tif' % (i+1)))\n\n # dzi 8 bits\n dzi8_found = False\n dzi8 = '%s_8BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files8 = '%s_8BITS_files' % f[:-8]\n if os.path.isfile(dzi8) and os.path.isdir(files8):\n symlink_p(dzi8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d.dzi' % (i+1)))\n symlink_p(files8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d_files' % (i+1)))\n dzi8_found = True\n\n # dzi 16 bits\n dzi16_found = False\n dzi16 = '%s_16BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files16 = '%s_16BITS_files' % f[:-8]\n if os.path.isfile(dzi16) and os.path.isdir(files16):\n symlink_p(dzi16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d.dzi' % (i+1)))\n symlink_p(files16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d_files' % (i+1)))\n dzi16_found = True\n\n # print warning if neither 8bit nor 16bit dzi was found\n if (not dzi8_found) and (not dzi16_found):\n print('WARNING: no dzi file found for img %s' % f, file=sys.stderr)\n\n if print_cfg_ipol:\n print_cfg.main(dest_dir, len(list_of_paths), ms)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def main(file_paths: Optional[List[Path]] = None):\n if file_paths:\n for file_path in file_paths:\n move_and_symlink_file(file_path)\n\n else:\n create_home_directories()\n create_home_directory_symbolic_links()", "def _PopulateDeploymentDir(deployment_dir, paths):\n for path in paths:\n destination = os.path.join(deployment_dir, os.path.basename(path))\n os.symlink(path, destination)", "def copy_and_link(file_name):\n if os.path.normpath(output_path) != os.getcwd():\n write_to_runner(f\"mv {file_name} {output_path} \\n\")\n write_to_runner(f\"ln -s {output_path}/{file_name} . \\n\")", "def copy_files(self, dest_dir: str, symlink: bool = True):\n\n # Convert dir to pathlib.Path\n dest_dir = pathlib.Path(dest_dir)\n\n # Make directory if it does not exist.\n if not dest_dir.is_dir():\n dest_dir.mkdir(parents=True)\n\n # Symlink/copy in exe\n from_file = self.wrf_hydro_exe\n to_file = dest_dir.joinpath(from_file.name)\n if symlink:\n to_file.symlink_to(from_file)\n else:\n shutil.copy(str(from_file), str(to_file))", "def create_links(self, name):\n for target, linknames in self._link_map.iteritems():\n for linkname in linknames:\n self._api.path.mock_copy_paths(target, linkname)\n self._api.python(\n name,\n self._resource,\n args = [\n '--link-json',\n self._api.json.input({str(target) : linkname\n for target, linkname in self._link_map.iteritems()\n }),\n ],\n infra_step=True)", "def use_tmpdir_for_files(basenames, src_dir, link_dir):\n script = list()\n unique = os.path.abspath(src_dir).replace('/', '_')\n root = tempfile.gettempdir()\n tmp_dir = os.path.join(root, 'falcon', unique)\n script.append('mkdir -p %s' %tmp_dir)\n for basename in basenames:\n src = os.path.join(src_dir, basename)\n dst = os.path.join(tmp_dir, basename)\n rm_cmd = 'rm -f %s' %basename\n # Wait on lock for up to 10 minutes, in case of very large files.\n rsync_cmd = \"flock -w 600 %s.lock -c 'rsync -av %s %s'\" %(dst, src, dst)\n ln_cmd = 'ln -sf %s %s' %(dst, basename)\n script.extend([rm_cmd, rsync_cmd, ln_cmd])\n return script", "def create_paths(manager, parentpath=\"extractor_test_results/HoG/\"):\n \n paths_to_create = [\"data/features_all\", \"data/features_filled\",\n \"data/pair/both\", \"hog_images\", \"hog_plots\",\n \"orig_frames\", \"processed_frames\", \"evaluation\"]\n \n for path in paths_to_create:\n manager.make_folder(parentpath + path)", "def update_symlinks(n):\n\tif n > 0: return\n\tsymlink_dir = sc.text_image_symlink_dir.absolute()\n\tfor tpi, info in sorted(index.items(), key=lambda t: t[0]):\n\t\tsymlink = symlink_dir / info['url']\n\t\tif symlink.is_symlink():\n\t\t\tif symlink.resolve() == info['file']:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsymlink.unlink()\n\t\tif not symlink.parent.exists():\n\t\t\tsymlink.parent.mkdir(parents=True)\n\t\tsymlink.symlink_to(info['file'])", "def create_home_directory_symbolic_links():\n file_paths = (\n path\n for path in repo_home.rglob(\"*\")\n if path.is_file() and not path.is_symlink()\n )\n\n for file_path in file_paths:\n sym_link_path = translate_home_path(file_path)\n\n if sym_link_path.is_file() and not sym_link_path.is_symlink():\n backup_file(sym_link_path)\n sym_link_path.unlink()\n\n if sym_link_path.is_symlink():\n sym_link_path.unlink()\n\n print(f\"Creating Symlink: {sym_link_path} -> {file_path}\")\n sym_link_path.symlink_to(file_path)", "def link_fastqs(fastq_in, fastq_out):\n if not os.path.exists(os.path.dirname(fastq_out)):\n os.mkdir(os.path.dirname(fastq_out))\n if not os.path.exists(fastq_out):\n os.symlink(fastq_in, fastq_out) \n\n \n\n #88888888888888888888888888888888888888888888888888\n #\n # R e a d t r i m m i n g\n #\n #88888888888888888888888888888888888888888888888888", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def task():\n if os.path.isdir(orig):\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*')) if \\\n os.path.isdir(fP) ]:\n if not os.path.exists(dest + fP[len(orig):]):\n os.makedirs(dest + fP[len(orig):])\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*/%s.log' %fmt.get_date())) if \\\n os.path.isfile(fP) ]:\n convert(fP, dest + fP[len(orig):])", "def copy_files(self):\n for (source_name, target_name) in self.FILES_TO_LINK:\n src = os.path.expanduser(source_name)\n tgt = os.path.expanduser(target_name)\n cmd = 'cp -rf {src} {tgt}'.format(src=src, tgt=tgt)\n\n print(cmd)\n if not self.dry_run:\n run(cmd)" ]
[ "0.71133333", "0.65588665", "0.65161", "0.64801997", "0.6456491", "0.6369632", "0.6306505", "0.627164", "0.6246197", "0.619943", "0.6190338", "0.6169204", "0.6164171", "0.6154397", "0.6095598", "0.60587347", "0.6024996", "0.59829164", "0.5939647", "0.59356904", "0.59115773", "0.5903005", "0.5883701", "0.58687365", "0.5837082", "0.579112", "0.5779693", "0.5718065", "0.57169795", "0.5715201" ]
0.72281915
0
Move the Production.nc files inside each of the folders passed as a glob to their own subfolders inside the self.app.data_folder directory.
def move_trajs_to_folder(self, input_folders): if type(input_folders) == str: input_folders = glob(input_folders) elif type(input_folders) == list: pass else: raise ValueError('input_folders must be of type str or list') data_folder = os.path.abspath(self.data_folder) for folder in input_folders: dst_folder = os.path.join(data_folder, os.path.basename(folder)) create_folder(dst_folder) os.rename( src=os.path.abspath(os.path.join(folder, 'Production.nc')), dst=os.path.join(dst_folder, 'Production.nc') )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def main(inputfolder):\n inputfolder = realpath(inputfolder)\n for data in DATASET:\n for fol in FOLDERS:\n actfile = join(inputfolder, data, data+'.txt')\n logger.info('Changing data in: %s' % actfile)\n filedata = []\n with open(actfile) as fin:\n for line in fin:\n id, y = map(int, line.strip().split('\\t'))\n if y == -1000:\n y = 0\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id)+'.jpg')\n filedata.append((path, y))\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id+1)+'.jpg')\n filedata.append((path, y))\n with open(actfile, 'w') as fout:\n for path, y in filedata:\n fout.write('%s %d\\n' % (path, y))", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def test_6e_move_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.moving_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare moving data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"move_file\"] % (GST.gs_file_paths[\"file_to_move_to_folder_source_path\"], GST.gs_file_paths[\"move_to_folder_target_path\"])\n try:\n self.send_request(function, \"move_file()\")\n except Exception as e:\n raise MoveException(\"Failed to move the data between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise MoveException(\"Failed to move the data between folders. \\n\" + response)", "def _prepare_simulation_subfolder(self, directory_strains):\n\t\tif not os.path.exists(directory_strains):\n\t\t\tos.mkdir(directory_strains)\n\t\tfor filename in self._directory_template_filenames:\n\t\t\tsrc = os.path.join(self._directory_template, filename)\n\t\t\tdst = os.path.join(directory_strains, filename)\n\t\t\tshutil.copy(src, dst)", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "async def _copy_folder_files(self, src_dir, dest_dir):\n for dir_item in os.listdir(src_dir):\n src_path = os.path.join(src_dir, dir_item)\n if os.path.isfile(src_path):\n await self._copy_file_with_hook(dir_item, src_path, os.path.join(dest_dir, dir_item))", "def distributeDataset(destinationFolder, testFolder, trainFolder):\n \n # Set up directories for test and training data sets\n if not os.path.exists(testFolder):\n os.makedirs(testFolder)\n if not os.path.exists(trainFolder):\n os.makedirs(trainFolder)\n\n # Generate list of directories\n dirs = []\n for i in range(0,8):\n dirs.append(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\\\\sd04\\\\png_txt\\\\figs_\" + str(i)))\n\n # Extract Test data\n files = os.listdir(dirs[0])\n\n for filename in files:\n shutil.copy(os.path.join(dirs[0], filename), testFolder)\n shutil.rmtree(dirs[0])\n\n # Extract Train data\n for i in range(1,8):\n\n files = os.listdir(dirs[i])\n for filename in files:\n shutil.copy(os.path.join(dirs[i], filename), trainFolder)\n shutil.rmtree(dirs[i])\n shutil.rmtree(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\"))", "def reconstruct_folder(data_root_paths, pixel_size, na, emission_wavelengths, excitation_wavelengths,\n affine_data_paths, otf_data_fname, dmd_pattern_data_fpath,\n channel_inds=None, crop_image=False, img_centers=None,\n crop_sizes=None, use_scmos_cal=False, scmos_calibration_file=None, widefield_only=False,\n nangles=3, nphases=3, npatterns_ignored=0, saving=True,\n zinds_to_use=None, tinds_to_use=None, xyinds_to_use=None,\n save_tif_stack=True, **kwargs):\n\n nfolders = len(data_root_paths)\n if nfolders == 0:\n raise ValueError(\"No folder paths were provided.\")\n\n ncolors = len(emission_wavelengths)\n if ncolors == 0:\n raise ValueError(\"No wavelength channels were provided.\")\n\n if channel_inds is None:\n channel_inds = list(range(ncolors))\n\n # ensure crop_sizes is a list the same size as number of folders\n if not isinstance(crop_sizes, list):\n crop_sizes = [crop_sizes]\n\n if len(crop_sizes) == 1 and nfolders > 1:\n crop_sizes = crop_sizes * nfolders\n\n if len(img_centers) == 1 and nfolders > 1:\n img_centers = img_centers * nfolders\n\n # ############################################\n # load affine data\n # ############################################\n affine_xforms = []\n for p in affine_data_paths:\n with open(p, 'rb') as f:\n affine_xforms.append(pickle.load(f)['affine_xform'])\n\n # ############################################\n # load DMD patterns frequency and phase data\n # ############################################\n frqs_dmd = np.zeros((ncolors, nangles, 2))\n phases_dmd = np.zeros((ncolors, nangles, nphases))\n for kk in range(ncolors):\n ppath = dmd_pattern_data_fpath[kk]\n xform = affine_xforms[kk]\n\n with open(ppath, 'rb') as f:\n pattern_data = pickle.load(f)\n\n # DMD intensity frequency and phase (twice electric field frq/phase)\n frqs_dmd[kk] = 2 * pattern_data['frqs']\n phases_dmd[kk] = 2 * pattern_data['phases']\n dmd_nx = pattern_data['nx']\n dmd_ny = pattern_data['ny']\n\n # ############################################\n # load OTF data\n # ############################################\n with open(otf_data_fname, 'rb') as f:\n otf_data = pickle.load(f)\n otf_p = otf_data['fit_params']\n\n if len(otf_p) == 1:\n otf_fn = lambda f, fmax: 1 / (1 + (f / fmax * otf_p[0]) ** 2) * \\\n psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n else:\n otf_fn = lambda f, fmax: 1 / (\n 1 + (f / fmax * otf_p[0]) ** 2 + (f / fmax * otf_p[1]) ** 4 + (f / fmax * otf_p[2]) ** 6 +\n (f / fmax * otf_p[3]) ** 8) * psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n # ############################################\n # load camera calibration file, if we need it\n # ############################################\n if use_scmos_cal:\n with open(scmos_calibration_file, 'rb') as f:\n data = pickle.load(f)\n gain_map = data['gains']\n offsets = data['offsets']\n #varmap = data['vars']\n\n # ############################################\n # SIM images\n # ############################################\n if not crop_image:\n crop_sizes = [np.nan] * len(data_root_paths)\n img_centers = [[np.nan, np.nan]] * len(data_root_paths)\n\n for rpath, crop_size, img_center in zip(data_root_paths, crop_sizes, img_centers):\n folder_path, folder = os.path.split(rpath)\n print(\"# ################################################################################\")\n print(\"analyzing folder: %s\" % folder)\n print(\"located in: %s\" % folder_path)\n\n tstamp = tools.get_timestamp()\n # path to store processed results\n if saving:\n sim_results_path = os.path.join(rpath, '%s_sim_reconstruction' % tstamp)\n if not os.path.exists(sim_results_path):\n os.mkdir(sim_results_path)\n print(\"save directory: %s\" % sim_results_path)\n\n # copy useful data files here\n for kk in range(ncolors):\n # copy affine data here\n _, fname = os.path.split(affine_data_paths[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(affine_data_paths[kk], fpath)\n\n # copy otf data here\n _, fname = os.path.split(otf_data_fname)\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(otf_data_fname, fpath)\n\n # copy DMD pattern data here\n _, fname = os.path.split(dmd_pattern_data_fpath[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(dmd_pattern_data_fpath[kk], fpath)\n\n # load metadata\n metadata, dims, summary = tools.parse_mm_metadata(rpath)\n start_time = datetime.datetime.strptime(summary['StartTime'], '%Y-%d-%m;%H:%M:%S.%f')\n nz = dims['z']\n nxy = dims['position']\n nt = dims['time']\n\n # use this construction as zinds can be different for different folders\n if zinds_to_use is None:\n zinds_to_use_temp = range(nz)\n else:\n zinds_to_use_temp = zinds_to_use\n nz_used = len(zinds_to_use_temp)\n\n if tinds_to_use is None:\n tinds_to_use_temp = range(nt)\n else:\n tinds_to_use_temp = tinds_to_use\n nt_used = len(tinds_to_use_temp)\n\n if xyinds_to_use is None:\n xyinds_to_use_temp = range(nxy)\n else:\n xyinds_to_use_temp = xyinds_to_use\n nxy_used = len(xyinds_to_use_temp)\n\n if pixel_size is None:\n pixel_size = metadata['PixelSizeUm'][0]\n\n # set up image size\n # load one file to check size\n fname = os.path.join(rpath, metadata['FileName'].values[0])\n im, _ = tools.read_tiff(fname, [metadata['ImageIndexInFile'].values[0]])\n _, ny_raw, nx_raw = im.shape\n if crop_image:\n # or pick ROI\n roi = tools.get_centered_roi(img_center, [crop_size, crop_size])\n\n # check points don't exceed image size\n if roi[0] < 0:\n roi[0] = 0\n if roi[1] > ny_raw:\n roi[1] = ny_raw\n if roi[2] < 0:\n roi[2] = 0\n if roi[3] > nx_raw:\n roi[3] = nx_raw\n else:\n roi = [0, ny_raw, 0, nx_raw]\n\n ny = roi[1] - roi[0]\n nx = roi[3] - roi[2]\n\n # arrays to save results\n imgs_sr = []\n imgs_os = []\n imgs_wf = []\n imgs_deconvolved = []\n counter = 1\n for kk in range(ncolors):\n sim_options = {'pixel_size': pixel_size, 'wavelength': emission_wavelengths[kk], 'na': na}\n\n # estimate otf\n fmax = 1 / (0.5 * emission_wavelengths[kk] / na)\n fx = tools.get_fft_frqs(nx, sim_options['pixel_size'])\n fy = tools.get_fft_frqs(ny, sim_options['pixel_size'])\n ff = np.sqrt(fx[None, :] ** 2 + fy[:, None] ** 2)\n otf = otf_fn(ff, fmax)\n otf[ff >= fmax] = 0\n\n # guess frequencies/phases\n frqs_guess = np.zeros((nangles, 2))\n phases_guess = np.zeros((nangles, nphases))\n for ii in range(nangles):\n for jj in range(nphases):\n # estimate frequencies based on affine_xform\n frqs_guess[ii, 0], frqs_guess[ii, 1], phases_guess[ii, jj] = \\\n affine.xform_sinusoid_params_roi(frqs_dmd[kk, ii, 0], frqs_dmd[kk, ii, 1],\n phases_dmd[kk, ii, jj], [dmd_ny, dmd_nx], roi, xform)\n\n # convert from 1/mirrors to 1/um\n frqs_guess = frqs_guess / pixel_size\n\n # analyze pictures\n for ii in tinds_to_use_temp:\n for bb in xyinds_to_use_temp:\n for aa in zinds_to_use_temp:\n tstart = time.process_time()\n\n identifier = \"%.0fnm_nt=%d_nxy=%d_nz=%d\" % (excitation_wavelengths[kk] * 1e3, ii, bb, aa)\n file_identifier = \"nc=%d_nt=%d_nxy=%d_nz=%d\" % (kk, ii, bb, aa)\n\n # where we will store results for this particular set\n if not widefield_only:\n sim_diagnostics_path = os.path.join(sim_results_path, identifier)\n if not os.path.exists(sim_diagnostics_path):\n os.mkdir(sim_diagnostics_path)\n\n # find images and load them\n raw_imgs = tools.read_dataset(metadata, z_indices=aa, xy_indices=bb, time_indices=ii,\n user_indices={\"UserChannelIndex\": channel_inds[kk],\n \"UserSimIndex\": list(range(npatterns_ignored, npatterns_ignored + nangles * nphases))})\n\n # error if we have wrong number of images\n if np.shape(raw_imgs)[0] != (nangles * nphases):\n raise ValueError(\"Found %d images, but expected %d images at channel=%d,\"\n \" zindex=%d, tindex=%d, xyindex=%d\" % (\n np.shape(raw_imgs)[0], nangles * nphases,\n channel_inds[kk], aa, ii, bb))\n\n # optionally convert from ADC to photons\n # todo: not very useful to do this way...\n if use_scmos_cal:\n imgs_sim = camera_noise.adc2photons(raw_imgs, gain_map, offsets)\n else:\n imgs_sim = raw_imgs\n\n # reshape to [nangles, nphases, ny, nx]\n imgs_sim = imgs_sim.reshape((nangles, nphases, raw_imgs.shape[1], raw_imgs.shape[2]))\n imgs_sim = imgs_sim[:, :, roi[0]:roi[1], roi[2]:roi[3]]\n\n # instantiate reconstruction object\n r = SimImageSet(sim_options, imgs_sim, frqs_guess, phases_guess=phases_guess, otf=otf,\n save_dir=sim_diagnostics_path, **kwargs)\n\n # if not saving stack, maybe want to handle in class?\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_os_%s.tif\" % file_identifier)\n tools.save_tiff(r.imgs_os, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"widefield_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield, fname, dtype='float32', datetime=start_time)\n else:\n # store widefield and os\n imgs_os.append(r.imgs_os)\n imgs_wf.append(r.widefield)\n\n if not widefield_only:\n # do reconstruction\n r.reconstruct()\n r.plot_figs()\n\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_sr_%s.tif\" % file_identifier)\n tools.save_tiff(r.img_sr, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"deconvolved_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield_deconvolution, fname, dtype='float32', datetime=start_time)\n else:\n # store sr and deconvolved\n imgs_sr.append(r.img_sr)\n imgs_deconvolved.append(r.widefield_deconvolution)\n\n # save reconstruction summary data\n r.save_result(os.path.join(sim_diagnostics_path, \"sim_reconstruction_params.pkl\"))\n\n tend = time.process_time()\n print(\"%d/%d from %s in %0.2fs\" % (counter, ncolors * nt_used * nxy_used * nz_used, folder, tend - tstart))\n\n counter += 1\n\n # #################################\n # save data for all reconstructed files\n # #################################\n if saving and save_tif_stack:\n\n # todo: want to include metadata in tif.\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'widefield.tif'))\n imgs_wf = np.asarray(imgs_wf)\n wf_to_save = np.reshape(imgs_wf, [ncolors, nt_used, nz_used, imgs_wf[0].shape[-2], imgs_wf[0].shape[-1]])\n tools.save_tiff(wf_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_os.tif'))\n imgs_os = np.asarray(imgs_os)\n sim_os = np.reshape(imgs_os, [ncolors, nt_used, nz_used, imgs_os[0].shape[-2], imgs_os[0].shape[-1]])\n tools.save_tiff(sim_os, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n if not widefield_only:\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_sr.tif'))\n imgs_sr = np.asarray(imgs_sr)\n sim_to_save = np.reshape(imgs_sr, [ncolors, nt_used, nz_used, imgs_sr[0].shape[-2], imgs_sr[0].shape[-1]])\n tools.save_tiff(sim_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'deconvolved.tif'))\n imgs_deconvolved = np.asarray(imgs_deconvolved)\n deconvolved_to_save = np.reshape(imgs_deconvolved, [ncolors, nt_used, nz_used, imgs_deconvolved[0].shape[-2],\n imgs_deconvolved[0].shape[-1]])\n tools.save_tiff(deconvolved_to_save, fname, dtype='float32', axes_order='CTZYX', hyperstack=True,\n datetime=start_time)\n\n return imgs_sr, imgs_wf, imgs_deconvolved, imgs_os", "def batch_dicom_to_nrrd(self, dicom_root, nrrd_root):\n dicom_files_dirs = glob.glob(dicom_root + '/*')\n for dicom_subject in dicom_files_dirs:\n subject = re.search(self.KEY_WORD_FLODER, dicom_subject).group()\n nrrd_subject = nrrd_root + '/' + subject\n self.dicom_to_nrrd(dicom_subject, nrrd_subject)", "def setup_ncfile_list(self):\n self.ncfilelist = []\n for file in os.listdir(self.dirpath_netcdf):\n if file.endswith('.nc'):\n self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))", "def move_generators_to_input(self, generator_folder_glob):\n spawn_folder_names = []\n generator_folders = glob(generator_folder_glob)\n for i, folder in enumerate(generator_folders):\n base_name = 'e01s{:02d}_{}f0000'.format(i + 1, os.path.basename(folder))\n input_destination = os.path.join(self.input_folder, base_name)\n data_destination = os.path.join(self.data_folder, base_name)\n create_folder(input_destination)\n create_folder(data_destination)\n spawn_folder_names.append(input_destination)\n create_symlinks(\n files=os.path.join(folder, '*'),\n dst_folder=os.path.relpath(input_destination)\n )\n return spawn_folder_names", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def copy_files():\n\n # Load the Knifey-Spoony dataset.\n # This is very fast as it only gathers lists of the files\n # and does not actually load the images into memory.\n dataset = load()\n\n # Copy the files to separate training- and test-dirs.\n dataset.copy_files(train_dir=train_dir, test_dir=test_dir)", "def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])", "def combine_world_model_train_data(problem, final_data_dir, old_data_dirs):\n for data_dir in old_data_dirs:\n suffix = os.path.basename(data_dir)\n # Glob train files in old data_dir\n old_train_files = tf.gfile.Glob(\n problem.filepattern(data_dir, tf.estimator.ModeKeys.TRAIN))\n for fname in old_train_files:\n # Move them to the new data_dir with a suffix\n # Since the data is read based on a prefix filepattern, adding the suffix\n # should be fine.\n new_fname = os.path.join(final_data_dir,\n os.path.basename(fname) + \".\" + suffix)\n if tf.gfile.Exists(new_fname):\n tf.gfile.Remove(new_fname)\n tf.gfile.Copy(fname, new_fname)", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def split_folder(data_dir, train_pct, val_pct):\n\n random.seed(1)\n\n IMG_SUFFIX = '*_sat.jpg'\n MASK_SUFFIX = '*_msk.png'\n\n glob_imgs = os.path.join(data_dir,IMG_SUFFIX)\n glob_masks = os.path.join(data_dir, MASK_SUFFIX)\n\n img_paths = np.array(sorted(glob.glob(glob_imgs)))\n mask_paths = np.array(sorted(glob.glob(glob_masks)))\n \n num_imgs = len(img_paths)\n index_lst = list(range(num_imgs))\n\n random.shuffle(index_lst)\n\n train_idx_bound = int(train_pct * num_imgs)\n train_imgs = img_paths[index_lst[:train_idx_bound]]\n train_masks = mask_paths[index_lst[:train_idx_bound]]\n\n val_idx_bound = int((train_pct + val_pct) * num_imgs)\n val_imgs = img_paths[index_lst[train_idx_bound: val_idx_bound]]\n val_masks = mask_paths[index_lst[train_idx_bound: val_idx_bound]]\n\n test_imgs = img_paths[index_lst[val_idx_bound:]]\n test_masks = mask_paths[index_lst[val_idx_bound:]]\n\n # Write the lists to their own directories\n copy_list_to_dir(train_imgs, \"train\")\n print(\"Moved images into: train\")\n copy_list_to_dir(train_masks, \"train\")\n print(\"Moved masks into: train\")\n copy_list_to_dir(val_imgs, \"val\")\n print(\"Moved images into: val\")\n copy_list_to_dir(val_masks, \"val\")\n print(\"Moved masks into: val\")\n copy_list_to_dir(test_imgs, \"test\")\n print(\"Moved images into: test\")\n copy_list_to_dir(test_masks, \"test\")\n print(\"Moved masks into: test\")", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def exec_combine(self, curr_step):\n assert(self.curr_step_idx > 0 and self.dlist is not None), \"Step Error: Must call init before combine\" \n \n #verify raw data & dlist\n self.B_VER(self.sess_path, self.dlist)\n\n #move & preprocess each folder\n raw_datadir = self.sess_path\n dest_datadir = self.sess_path\n foldername = curr_step.get(\"foldername\", \"main\")\n for i, folder in enumerate(self.dlist):\n self.data_utils.MOVE(raw_datadir, folder, dest_datadir, flist=[], preview=False, op='combine_choosename|' + foldername)\n self.dlist = [foldername]\n self.default_vis(curr_step)", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')", "def convert_dataset(src_dir, dest_dir):\n subdirs = get_subdirs(src_dir)\n detector = dlib.simple_object_detector(MODEL_PATH)\n for img_dir in tqdm(subdirs):\n\tprint(img_dir)\n jpegs = get_img_paths_in_dir(img_dir)\n target_dir = dest_dir + img_dir.split('/')[-1]\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n for src_path in jpegs:\n target_path = target_dir + '/' + src_path.split('/')[-1]\n img = io.imread(src_path)\n dets = detector(img)\n bounding_boxes = get_bounding_boxes(dets)\n if bounding_boxes:\n square_box = find_square_box(bounding_boxes[0])\n if is_valid(square_box, img):\n box = bounding_boxes[0]\n square_box = find_square_box(box)\n cropped_img = crop_frame(img, square_box)\n PIL_img = PIL.Image.fromarray(cropped_img)\n resized_img = PIL_img.resize((54,54), PIL.Image.BILINEAR)\n\t\t resized_img.save(target_path)\n print(target_path)\n # grey_img = resized_img.convert('L')\n # grey_img.save(target_path)" ]
[ "0.6202731", "0.6148555", "0.6145362", "0.61036813", "0.6101068", "0.60603684", "0.5980905", "0.5971773", "0.59518343", "0.5937501", "0.5891356", "0.5873109", "0.58078676", "0.5791588", "0.57874477", "0.57657754", "0.57594436", "0.57572466", "0.5714175", "0.56791556", "0.56619835", "0.56578916", "0.5647847", "0.5643549", "0.5641105", "0.5597024", "0.5595936", "0.55745155", "0.55650014", "0.5562634" ]
0.7128477
0
Fit the adaptive model onto the trajectories
def fit_model(self): logger.info('Fitting model') if self.traj_dict is None: self.traj_dict = self.get_traj_dict() self.model.fit(self.traj_dict.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit():\n pass", "def fit(self, X):", "def fit(self):\n converge = False\n while not converge:\n converge = True\n for xi, yi in zip(self.X, self.y):\n yhat = self.classify(xi)\n if yhat != yi:\n converge = False\n # update model\n self.W += self.lr * yi * xi\n self.b += self.lr * yi * 1", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def fit(self):\n raise NotImplementedError", "def fit(self, x):\n pass", "def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self):\n # Initialize parameter estimates\n if self.estimator is not None:\n param_estimates = self.estimator(self.xf, self.yf)\n else: param_estimates = None\n self.popt, self.pcov = curve_fit(self.model, self.xf, self.yf, \n p0=param_estimates)\n self.fit_history.append({\"popt\" : self.popt, \"pcov\" : self.pcov})", "def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()", "def fit(self, y):\n if isinstance(y, np.ndarray) and y.ndim == 2:\n y = [y]\n y_all = np.concatenate(y)\n self.mean_ = y_all.mean(axis=0, keepdims=True)\n y = [yi - self.mean_ for yi in y]\n n = y[0].shape[1]\n T = [yi.shape[0] for yi in y]\n model = FA(self.n_factors, svd_method='lapack')\n model.fit(y_all)\n\n self.R_ = np.diag(model.noise_variance_)\n self.C_ = model.components_.T\n self.d_ = np.zeros(n)\n self.tau_ = self.tau_init + self.rng.rand(self.n_factors)\n # Allocated and reuse these\n C = self.C_\n R = self.R_\n big_K = {Ti: calc_big_K(Ti, self.n_factors, self.tau_, self.var_n) for Ti in set(T)}\n y_cov = {Ti: block_dot_B(block_dot_A(C, big_K[Ti], Ti), C.T, Ti) + make_block_diag(R, Ti)\n for Ti in set(T)}\n big_d = {Ti: np.tile(self.d_, Ti) for Ti in set(T)}\n big_y = [yi.ravel() for yi in y]\n ll_pre = log_likelihood(big_d, y_cov, big_y, T)\n if self.verbose:\n print(\"FA log likelihood:\", ll_pre)\n\n converged = False\n for ii in range(self.max_iter):\n ll = self._em_iter(y, big_K)\n if abs(ll - ll_pre) / np.amax([abs(ll), abs(ll_pre), 1.]) <= self.tol:\n converged = True\n break\n ll_pre = ll\n if not converged:\n warnings.warn(\"EM max_iter reached.\", ConvergenceWarning)\n return self", "def fitModel(self, params:lmfit.Parameters=None):\r\n if params is None:\r\n params = self.params\r\n self.initializeRoadRunnerModel()\r\n if self.parametersToFit is not None:\r\n self.optimizer = Optimizer.optimize(self.calcResiduals, params,\r\n self._fitterMethods, logger=self.logger,\r\n numRestart=self._numRestart)\r\n self.minimizerResult = self.optimizer.minimizerResult\r\n # Ensure that residualsTS and fittedTS match the parameters\r\n self.updateFittedAndResiduals(params=self.params)", "def fit(self):\n raise NotImplementedError # pragma: no cover", "def fit(self, x):\n raise NotImplementedError()", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def _fit(self, y, X, fh):\n _, forecasters = self._check_forecasters()\n\n # get training data for meta-model\n if X is not None:\n y_train, y_test, X_train, X_test = temporal_train_test_split(\n y, X, test_size=self.test_size\n )\n else:\n y_train, y_test = temporal_train_test_split(y, test_size=self.test_size)\n X_train, X_test = None, None\n\n # fit ensemble models\n fh_test = ForecastingHorizon(y_test.index, is_relative=False)\n self._fit_forecasters(forecasters, y_train, X_train, fh_test)\n\n if self.method == \"feature-importance\":\n self.regressor_ = check_regressor(\n regressor=self.regressor, random_state=self.random_state\n )\n X_meta = pd.concat(self._predict_forecasters(fh_test, X_test), axis=1)\n X_meta.columns = pd.RangeIndex(len(X_meta.columns))\n\n # fit meta-model (regressor) on predictions of ensemble models\n # with y_test as endog/target\n self.regressor_.fit(X=X_meta, y=y_test)\n\n # check if regressor is a sklearn.Pipeline\n if isinstance(self.regressor_, Pipeline):\n # extract regressor from pipeline to access its attributes\n self.weights_ = _get_weights(self.regressor_.steps[-1][1])\n else:\n self.weights_ = _get_weights(self.regressor_)\n\n elif self.method == \"inverse-variance\":\n # get in-sample forecasts\n if self.regressor is not None:\n Warning(f\"regressor will not be used because ${self.method} is set.\")\n inv_var = np.array(\n [\n 1 / np.var(y_test - y_pred_test)\n for y_pred_test in self._predict_forecasters(fh_test, X_test)\n ]\n )\n # standardize the inverse variance\n self.weights_ = list(inv_var / np.sum(inv_var))\n else:\n raise NotImplementedError(\n f\"Given method {self.method} does not exist, \"\n f\"please provide valid method parameter.\"\n )\n\n self._fit_forecasters(forecasters, y, X, fh)\n return self", "def fit(self, X,y):\n pass" ]
[ "0.6692488", "0.6590946", "0.6487492", "0.646762", "0.64532554", "0.64378124", "0.6380393", "0.6379069", "0.63625103", "0.63625103", "0.63625103", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6333218", "0.6314133", "0.6295259", "0.6279434", "0.6267226", "0.6240726", "0.62129223", "0.6209191", "0.6200563", "0.6162548" ]
0.72130895
0
get one line from the socket
def get_line(sock): # from red/pie getLine (redis) # yy=atpic.log.setname(xx,'get_line') line = b"" while True: next_byte = sock.recv(1) # read a byte if next_byte == b"\r": # if it's end of line, break break line += next_byte # otherwise, istick it with the rest sock.recv(1) # Consume the remaining \n character # atpic.log.debug(yy,'line',line) return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_line(self):\r\n line = \"\"\r\n while not line[-2:] == \"\\r\\n\":\r\n char = self.sock.recv(1)\r\n if not char:\r\n raise SocketClosedException\r\n line += char\r\n return line.strip()", "def recvline(self):\n return self.recvtil('\\n')", "def read_line(sock_stream):\n line = sock_stream.readline().strip()\n print('READ: {}'.format(line))\n return line", "def _read_line(self):\n line = ''\n while True:\n c = self.s.read()\n if c == b'':\n raise EvseTimeoutError\n line += c.decode('ascii')\n if c == b'\\r':\n break\n return line", "def readline(self):\n\t\t# Much slower than built-in method!\n\t\tlf = 0\n\t\twhile True:\n\t\t\tlf = string.find(self.inbuf, '\\n')\n\t\t\tif lf >= 0:\n\t\t\t\tbreak\n\t\t\tr = self.sock.recv(4096)\n\t\t\tif not r: \n\t\t\t\t# connection broken\n\t\t\t\tbreak\n\t\t\tself.inbuf = self.inbuf + r\n\t\tlf = lf + 1\n\t\tdata = self.inbuf[:lf]\n\t\tself.inbuf = self.inbuf[lf:]\n\t\treturn data", "def readline(self):\n\t\t# Much slower than built-in method!\n\t\tlf = 0\n\t\twhile True:\n\t\t\tlf = string.find(self.inbuf, '\\n')\n\t\t\tif lf >= 0:\n\t\t\t\tbreak\n\t\t\tr = self.sock.recv(4096)\n\t\t\tif not r: \n\t\t\t\t# connection broken\n\t\t\t\tbreak\n\t\t\tself.inbuf = self.inbuf + r\n\t\tlf = lf + 1\n\t\tdata = self.inbuf[:lf]\n\t\tself.inbuf = self.inbuf[lf:]\n\t\treturn data", "def receive_sock_line(sock):\n buf = \"\"\n while buf.find(\"\\n\") <= -1:\n buf += sock.recv(2**10).decode()\n return buf.strip()", "def __read_line(self):\n ret = b\"\"\n while True:\n try:\n pos = self.__read_buffer.index(CRLF)\n ret = self.__read_buffer[:pos]\n self.__read_buffer = self.__read_buffer[pos + len(CRLF):]\n break\n except ValueError:\n pass\n try:\n nval = self.sock.recv(self.read_size)\n self.__dprint(nval)\n if not len(nval):\n break\n self.__read_buffer += nval\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read data from the server\")\n\n if len(ret):\n m = self.__size_expr.match(ret)\n if m:\n raise Literal(int(m.group(1)))\n\n m = self.__respcode_expr.match(ret)\n if m:\n if m.group(1) == b\"BYE\":\n raise Error(\"Connection closed by server\")\n if m.group(1) == b\"NO\":\n self.__parse_error(m.group(2))\n raise Response(m.group(1), m.group(2))\n return ret", "def getLine(self):\r\n # This is important: \r\n # The data that is transmitted over the socket (the entire contents \r\n # of one protocol message will be put into one string of bytes that\r\n # is terminated by exactly one newline character 0x0a at the end.\r\n # \r\n # This string of bytes is what I refer to as the \"line\"\r\n #\r\n # Therefore the entire message data (the contents of ProtocolMsg.blob)\r\n # which can contain any arbitrary byte sequence (even chat messages are \r\n # considered a blob since they are UTF-8 text with arbitrary formatting \r\n # chars) will be properly encoded for transmission in such a way that \r\n # it will not contain any 0x0a bytes anymore.\r\n #\r\n # This is implemented in the functions encodeLF() and decodeLF()\r\n #\r\n # getLine() is called right before transmitting it over the socket\r\n # to produce the \"line\" and the exact inverse operation on the \r\n # receiving side will happen in __init__() when a new message object \r\n # is constructed from the incoming encoded line string. \r\n return \"%s %s\\n\" % (self.command, encodeLF(self.blob))", "def readline( self ):\n self.readbuf += self.read( 1024 )\n if '\\n' not in self.readbuf:\n return None\n pos = self.readbuf.find( '\\n' )\n line = self.readbuf[ 0 : pos ]\n self.readbuf = self.readbuf[ pos + 1: ]\n return line", "def readline(self):\n returnIndex = self._RX_buf.index(\"\\n\") # \\r\\n technically\n if returnIndex != -1:\n s = self._RX_buf[0:returnIndex + 1]\n self._RX_buf = self._RX_buf[returnIndex + 1:]\n return s # bytes(s, encoding='ascii') # s\n else:\n return 0x04 # ''", "def readline(self) -> bytes | None:", "def read_next_line(data_socket):\r\n current_byte = next_byte(data_socket)\r\n found_line = b''\r\n while current_byte != b'\\x0a':\r\n found_line += current_byte\r\n current_byte = next_byte(data_socket)\r\n return found_line", "def read_block(sock):\r\n lines = []\r\n line = \"\"\r\n while True:\r\n res = sock.recv(1)\r\n line += res\r\n if res == \"\":\r\n return None\r\n if res == \"\\n\":\r\n line = line.strip()\r\n if line == \"\":\r\n return lines\r\n lines.append(line)\r\n line = \"\"", "def get_response():\n line = FROMPIPE.readline()\n result = \"\"\n while True:\n result += line\n line = FROMPIPE.readline()\n # print(f\"Line read: [{line}]\")\n if line == '\\n':\n return result", "def readline( shell ):\n global readbuf\n readbuf += read( shell, 1024 )\n if '\\n' not in readbuf:\n return None\n pos = readbuf.find( '\\n' )\n line = readbuf[ 0: pos ]\n readbuf = readbuf[ pos + 1: ]\n return line", "def readline(self):\n while(True):\n rxcount = self.in_waiting \n if rxcount > 0: \n for pos, i in enumerate(self.buffer):\n # look for the \\n\n if i == 10: \n line=''\n linebuf = self.buffer[:pos]\n self.buffer = self.buffer[pos+1:]\n for c in linebuf:\n line += chr(c)\n return line", "async def readline(self) -> bytes:\n ...", "def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF", "def readline(self):\n sep = b'\\n'\n seplen = len(sep)\n try:\n line = yield from self.readuntil(sep)\n except IncompleteReadError as e:\n return e.partial\n except LimitOverrunError as e:\n if self._buffer.startswith(sep, e.consumed):\n del self._buffer[:e.consumed + seplen]\n else:\n self._buffer.clear()\n self._maybe_resume_transport()\n raise ValueError(e.args[0])\n return line", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def readline(self) -> bytes:\r\n if not len(self.buffer):\r\n return b\"\"\r\n return self.buffer.pop(0)", "def recv(self, bufsize=1024, timeout=None):\r\n if timeout is not None:\r\n self._socket.settimeout(timeout)\r\n ans = ''\r\n while True:\r\n data = self._socket.recv(bufsize)\r\n logger.debug('Data received: %s ...', repr(data))\r\n ans += data.decode(\"utf-8\")\r\n ans = ans.split(xsct_line_end)\r\n if len(ans) > 1:\r\n return ans[0]", "def parseLine(data_socket):\r\n nextByte = next_byte(data_socket);\r\n lineMessage = b'';\r\n while nextByte != b'\\x0a':\r\n lineMessage += nextByte\r\n nextByte = next_byte(data_socket)\r\n lineMessage += b'\\n'\r\n return lineMessage.decode()", "def readline(self) -> bytes:\n ...", "def readline(self) -> bytes:\n ...", "def handle(self):\n line = b\"\"\n try:\n while True:\n raw = self.request.recv(1024)\n if not raw:\n return\n raw = bytearray(raw)\n while True:\n splitter = raw.find(b\"\\r\")\n if splitter > -1:\n line = raw[1:splitter]\n raw = raw[splitter + 1 :]\n else:\n break\n\n self.handle_line(line.decode())\n except Exception as exc:\n _LOGGER.error(\n \"TCP: Handle: last line %s gave error: %s\", line.decode(), str(exc)\n )\n return", "def readline(self) -> Optional[bytes]:\n ...", "def _readline(self):\n\n eol = b'\\r'\n leneol = len(eol)\n line = bytearray()\n while True:\n c = self.ser.read(1)\n if c:\n line += c\n if line[-leneol:] == eol:\n break\n else:\n break\n return bytes(line)", "def getDataFromSocket(session):\n \n dat = \"\"\n while 1:\n message = session.recv(4096).decode()\n last=len(message)\n if message[last-1] == \"\\n\":\n dat=dat+message[:-1]\n return dat\n else:\n dat=dat+message" ]
[ "0.803798", "0.78673536", "0.78072685", "0.75777656", "0.75243616", "0.75243616", "0.7488651", "0.71590877", "0.71435183", "0.7124684", "0.71031475", "0.7009135", "0.7001291", "0.69961816", "0.6950862", "0.6938723", "0.6819079", "0.67579776", "0.67553467", "0.6735071", "0.66757995", "0.6637992", "0.66144854", "0.65902984", "0.6585481", "0.6585481", "0.65761685", "0.6575278", "0.6546799", "0.6498436" ]
0.83299303
0
command is one of 'incr' or 'decr'
def incrdecr(con,command,key,value=1): # yy=atpic.log.setname(xx,'incrdecr') thecommand="{command} {key} {value}\r\n".format(command=command,key=key,value=value) con.send(thecommand.encode('utf-8')) line=get_line(con) # atpic.log.debug(yy,line) if line==b'NOT_FOUND': return None else: return int(line.strip())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _incrdecr(self, cmd, key, delta):\n\t\tcheck_key(key)\n\t\tserver, key = yield self._get_server_for(key)\n\t\tif not server:\n\t\t\treturn\n\n\t\tcmd = \"%s %s %d\\r\\n\" % (cmd, key, delta)\n\n\t\ttry:\n\t\t\tyield server.sendall(cmd)\n\t\t\tline = yield server.read_line()\n\t\t\traise StopIteration(int(line))\n\t\texcept tcp.ConnectionClosedException:\n\t\t\tserver.mark_dead()", "def handle_incr(self, api, command):\n key = self._sandboxed_key(api.sandbox_id, command.get('key'))\n if not (yield self.check_keys(api, key)):\n returnValue(self._too_many_keys(command))\n amount = command.get('amount', 1)\n try:\n value = yield self.redis.incr(key, amount=amount)\n except Exception, e:\n returnValue(self.reply(command, success=False, reason=unicode(e)))\n returnValue(self.reply(command, value=int(value), success=True))", "def _add_cmd(self, cmd):\n if cmd.gate == Allocate:\n self._active_qubits += 1\n elif cmd.gate == Deallocate:\n self._active_qubits -= 1\n elif cmd.gate == Measure:\n for qureg in cmd.qubits:\n for qubit in qureg:\n self.main_engine.set_measurement_result(qubit, 0)\n elif self._is_rotation(cmd):\n self._num_rotations += 1\n self._rotations.append(self._decompose_rotation(cmd))", "def get_next_command( self, ):\n self.ix_command += 1\n if self.ix_command >= len( self.command_list ):\n ret = None\n else:\n ret = self.command_list[ self.ix_command ]\n# print( f\"command = { self.ix_command} {ret} \", flush = True )\n return ret", "def incr(self, key, delta=1):\n\t\treturn self._incrdecr(\"incr\", key, delta)", "def test_incrdecr(self):\n\t\tyield self.conn.set(\"an_integer\", 42)\n\n\t\tself.assertEqual((yield self.conn.incr(\"an_integer\", 1)), 43)\n\t\tself.assertEqual((yield self.conn.decr(\"an_integer\", 1)), 42)", "def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)", "def incr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"decr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"incr\", key, delta)", "def test_pos_operate_increment_nonexistent_bin(self):\n key = (\"test\", \"demo\", 1)\n llist = [{\"op\": aerospike.OPERATOR_INCR, \"bin\": \"my_age\", \"val\": 5}]\n\n self.as_connection.operate(key, llist)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert bins == {\"my_age\": 5, \"age\": 1, \"name\": \"name1\"}", "def process_commands(cmds, program_counter):\n\n jmp_offset = cmds[program_counter]\n\n if jmp_offset > 2:\n cmds[program_counter] -= 1\n else:\n cmds[program_counter] += 1\n\n return jmp_offset + program_counter", "async def on_command(self, ctx: vbu.Context):\n\n command = ctx.command\n command_name = command.name\n\n async with self.bot.database() as db:\n current_count = await db(\"SELECT count FROM command_counter WHERE command_name=$1\", command_name)\n\n # Make sure we get a current count\n if current_count:\n current_count = current_count[0]['count']\n else:\n current_count = 0\n\n await db(\"INSERT INTO command_counter (command_name, count) VALUES ($1, $2) ON CONFLICT (command_name) DO UPDATE SET count = $2\", command_name, current_count + 1)\n\n self.bot.logger.info(f\"Logging command completion: {ctx.command.name}\")", "def execute(self, cmd: Command) -> int:\n try:\n return self.cmds[cmd.id]\n except KeyError:\n if cmd.val:\n self.state[cmd.key] = cmd.val\n self.cmds[cmd.id] = self.state[cmd.key]\n return self.cmds[cmd.id]", "def incr_operand(self):\n pass", "def addCommand(self, command):\r\n self.noOfCommands = self.noOfCommands+1\r\n with open(self.jobFile, \"a\") as jobWriter:\r\n jobWriter.write(command + \"\\n\")", "def test_pos_operate_increment_nonexistent_key(self):\n key = (\"test\", \"demo\", \"non_existentkey\")\n llist = [{\"op\": aerospike.OPERATOR_INCR, \"bin\": \"age\", \"val\": 5}]\n\n self.as_connection.operate(key, llist)\n\n (key, _, bins) = self.as_connection.get(key)\n\n assert bins == {\"age\": 5}\n\n self.as_connection.remove(key)", "def update_cmd(self, key, update_value):\r\n\t\tif self._iscommand(key):\r\n\t\t\tCOMMAND_NAME[key] = update_value\r\n\t\t\tself._writer(self._str_converter(COMMAND_NAME))\r\n\t\telse:\r\n\t\t\tprint(key, 'no existe')\r\n\t\t\treturn 'ERROR'", "def cmd(self, cmd):\n return cmd", "def _hincrby(self, hashkey, attribute, command, type_, increment):\n redis_hash = self._get_hash(hashkey, command, create=True)\n attribute = self._encode(attribute)\n previous_value = type_(self._decode(redis_hash.get(attribute, '0')))\n redis_hash[attribute] = self._encode(previous_value + increment)\n return type_(redis_hash[attribute])", "def write_arithmetic(self, command):\n if command == \"add\":\n self.add_command()\n elif command == \"sub\":\n self.sub_command()\n elif command == \"neg\":\n self.neg_command()\n elif command == \"eq\":\n self.eq_command()\n self.__label_num += 1\n elif command == \"gt\":\n self.gt_command()\n self.__label_num += 1\n elif command == \"lt\":\n self.lt_command()\n self.__label_num += 1\n elif command == \"and\":\n self.and_command()\n elif command == \"or\":\n self.or_command()\n elif command == \"not\":\n self.not_command()", "def is_incr(self, idx):\n return self.args[0].is_positive()", "def is_incr(self, idx):\n return False", "def is_incr(self, idx):\n return False", "def is_incr(self, idx):\n return False", "def _change_cmd(self, cmd_number: int, new_cmd: str):\n if cmd_number is 0:\n self.command_group.cmd0 = str(new_cmd)\n elif cmd_number is 1:\n self.command_group.cmd1 = str(new_cmd)\n elif cmd_number is 2:\n self.command_group.cmd2 = str(new_cmd)\n else:\n assert False\n\n if self.command_group.is_cmd_runner_command(new_cmd):\n self._initialize_runner(new_cmd)\n\n logging.info(\"Setting {0} to {1}\".format(str(cmd_number), str(new_cmd)))\n cybld_helpers.print_seperator_lines()", "def hincrby(self, key, field, num):\n return self._command(b'HINCRBY', key, field, num)", "def command(self, cmd):\n cmd = cmd.encode(encoding='UTF-8')\n if cmd[-1] != b'\\n':\n # make sure the command ends with \\n, otherwise the client will\n # block\n cmd += b'\\n'\n yield from self._cmds.put(cmd)\n resp = yield from self._get_response()\n return resp", "def testIncrementDecrement(self):\n\n memcache.incr('unknown_key')\n assert memcache.get('unknown_key') == None\n memcache.set('counter', 0)\n assert memcache.get('counter') == 0\n memcache.incr('counter')\n assert memcache.get('counter') == 1\n memcache.incr('counter', delta=2)\n assert memcache.get('counter') == 3\n memcache.decr('counter')\n assert memcache.get('counter') == 2\n memcache.decr('counter', 2)\n assert memcache.get('counter') == 0\n memcache.incr('second_counter', initial_value=10)\n assert memcache.get('second_counter') == 11\n memcache.decr('third_counter', initial_value=10)\n assert memcache.get('third_counter') == 9\n\n # This should cause an error message, because zero deltas are not\n # allowed.\n memcache.incr('counter', delta=0)\n\n memcache.set('lcounter', long(20))\n assert memcache.get('lcounter') == long(20)\n memcache.incr('lcounter')\n assert memcache.get('lcounter') == long(21)", "def advance(self):\n if self.has_more_commands():\n self.counter += 1", "def cmd(self, command):\n self._commands.append(command)", "def is_incr(self, idx) -> bool:\n return False" ]
[ "0.7011128", "0.660569", "0.6230415", "0.61279076", "0.6108874", "0.602091", "0.5920075", "0.5847304", "0.5772811", "0.57724774", "0.574264", "0.57074124", "0.5691151", "0.567981", "0.5631203", "0.5577918", "0.5520286", "0.5517095", "0.5509518", "0.5490961", "0.54887533", "0.54887533", "0.54887533", "0.5483494", "0.54745346", "0.54468334", "0.5442469", "0.5425527", "0.54048836", "0.538353" ]
0.76081705
0
scatterplot of color 'color' for the points of L
def plot_points(L, color): X = list() Y = list() for p in L: X.append(p[0]) Y.append(p[1]) plt.scatter(X, Y, c=color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scatterPlot2():\n N = 100\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n\n plt.scatter(x, y, c=colors, alpha=0.5)\n plt.show()", "def plot_colors(rgb_colors):\n nr_dots = len(rgb_colors)\n\n dots = []\n x = []\n y = []\n for i in range(nr_dots):\n x.append(i + 20)\n y.append(i + 20)\n \n #plt.scatter(x, y, c=cmap, s=50)\n plt.scatter(x, y, c=rgb_colors, s=50)\n\n plt.show()", "def plot_scatter_and_linreg(df, col='b'):\n lr = LinearRegression()\n lr.fit(df['x'].reshape(-1, 1), df['y'])\n df.plot(kind='scatter', x='x', y='y', c=col, s=50)\n x_pred = np.linspace(df['x'].min(), df['x'].max(), 10)\n y_pred = lr.predict(x_pred.reshape(-1, 1))\n plt.plot(x_pred, y_pred, ls=':', c=col)\n\n plt.title(df.name)", "def scatter_plot(x_all, labels):\n \n color = labels.iloc[:,1].apply(lambda x: \"green\" if x == 0 else \"red\" if x == 1 else \"blue\")\n \n plt.figure(figsize = (6,4))\n plt.scatter(x_all.iloc[:,1], x_all.iloc[:,2], c = color)\n plt.xlabel('Title Polarity', fontsize = 15)\n plt.ylabel('Content Polarity', fontsize = 15)", "def plot_scatter_points(self):\n self.plot(1)", "def ScatterPlot(data, labels, gamma):\n\n fig, ax = plt.subplots(figsize=(16, 8))\n for i, label in enumerate(labels):\n plt.scatter(data[i, 0], data[i, 1], label=label,\n color=['red' if label < 0 else 'green'])\n plt.axvline(gamma / 2, linestyle='--', color='indigo', alpha=0.3)\n plt.axvline(-gamma / 2, linestyle='--', color='indigo', alpha=0.3)\n plt.show()", "def scatter_list(self, l):\n pass", "def plot_points(self, _pts, color='b', marker='o'):\n xs, ys, zs = _pts[:,0], _pts[:,1], _pts[:,2]\n self.fig_ax.scatter(xs, ys, zs, color=color, marker=marker)\n plt.draw()", "def scatterplot():\r\n #get the data for the plots\r\n reddata = np.array([[1,1],[1,3],[4,2]])\r\n bluedata = np.array([[0,1],[0,5],[1,2],[2,3],[3,4]])\r\n yellowdata = np.array([[1,4],[2,2],[3,5],[6,2]])\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(reddata, columns=[\"x\",\"y\"])\r\n df1 = pd.DataFrame(bluedata, columns=[\"x\",\"y\"])\r\n df2 = pd.DataFrame(yellowdata, columns=[\"x\",\"y\"])\r\n #create the plot\r\n ax = df.plot.scatter(x=\"x\",y=\"y\",label=\"Red Group\",color=\"Red\",title=\"Scatter Plot in Three Colors\",xlim=(-1,7),ylim=(0,6))\r\n ax1 = df1.plot.scatter(x=\"x\",y=\"y\",label=\"Blue Group\",color=\"Blue\",ax=ax)\r\n ax2 = df2.plot.scatter(x=\"x\",y=\"y\",label=\"Yellow Group\",color=\"Yellow\",ax=ax)\r\n #get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_scatter_plot.png\")", "def color_marked_scatter_plot(data,marker_column,labels,dim=2):\n\n color_list=['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n marker_list=['o', 'X', '8', 's', 'p', 'h', 'H', 'D', 'd', 'P']\n\n kinds=pd.Series(marker_column).drop_duplicates().sort_values().values\n\n color_dict=dict(zip(kinds,color_list[0:len(kinds)]))\n marker_dict=dict(zip(kinds,marker_list[0:len(kinds)]))\n\n data_marker=pd.DataFrame(data).assign(marker=marker_column)\n data_marked=[data_marker[data_marker['marker']==kind] for kind in kinds]\n\n fig = plt.figure()\n\n if dim==2:\n for i in range(0,len(data_marked)):\n plt.scatter(data_marked[i].iloc[:,0],data_marked[i].iloc[:,1],\n color=color_dict[kinds[i]],\n marker=marker_dict[kinds[i]])\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n elif dim==3:\n ax = fig.add_subplot(111, projection='3d')\n for i in range(0,len(data_marked)):\n ax.scatter(data_marked[i].iloc[:,0],data_marked[i].iloc[:,1],\n data_marked[i].iloc[:,2],\n color=color_dict[kinds[i]],\n marker=marker_dict[kinds[i]])\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n ax.set_zlabel(labels[2])\n else:\n print('Not implemented.')\n plt.show()\n return fig,color_dict,marker_dict", "def plot_scatter(x, y):\n\tplt.scatter(x, y)", "def scatterplot(loc: List[CrimeStatistics]) -> None: \n # return None #stub\n #template based on visualization\n \n x = enrollment_list(loc)\n y = crime_list(loc)\n \n \n pyplot.scatter(x,y)\n pyplot.xlabel(\"Enrollment\")\n pyplot.ylabel(\"Total crime per campus\")\n pyplot.title(\"correlation between enrollment and crimes committed\")\n \n \n \n pyplot.show()\n print(linregress(x,y))\n \n \n return None", "def show_scatter(self):\n plt.scatter(self.a1[:, 0], self.a1[:, 1], c=\"red\", alpha=0.5, s=10)\n plt.scatter(self.a2[:, 0], self.a2[:, 1], c=\"blue\", alpha=0.5, s=10)\n plt.scatter(0, 0, marker=\"D\", c=\"black\", alpha=0.8)\n plt.scatter(2, 2, marker=\"D\", c=\"black\", alpha=0.8)\n plt.show()", "def custom_scatterplot(ax, x, y, error, xlims, ylims, color='green', markerscale=100):\n\n markersize = error * markerscale\n\n ax.scatter(x, y, color=color, marker='o', s=markersize, alpha=0.5)\n\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n\n return ax", "def ad_sub_scatter(V,y,fig,ax,plotpos,x_label,y_label):\n ax = fig.add_subplot(plotpos)\n c = ['k','b','m','c']\n #s = [0.8,1,1,1]\n for i in range(len(V)):\n ax.scatter(V[i],y[i],color=c[i], s=5)\n ax.set_xlabel(x_label, fontsize = 16)\n ax.set_ylabel(y_label, fontsize = 16)\n ax.tick_params(axis='both' , labelsize = 12.0)\n return ax", "def plot_scatter_points_lines(self):\n self.plot(2)", "def scatterplot(\n template,\n tm: str,\n x_data: list,\n y_data: list,\n t: str,\n x_l: str,\n y_l: str,\n color: str = \"b\",\n marker: str = \"o\"\n ) -> None:\n\n # Determine the maximum x-axis value of the graph\n x_max = math.ceil(max(x_data))\n\n # Open a figure\n plot.figure()\n\n # Plot the scatter plot\n plot.rcParams.update({\"figure.figsize\":(7, 5), \"figure.dpi\":100})\n plot.scatter(x_data, y_data, c = color, marker = marker)\n plot.gca().set(title = t, ylabel = y_l, xlabel = x_l)\n plot.xlim(0, x_max)\n\n # # Show the plot\n # plot.show()\n\n # Save the figure\n save_plot(template, t, tm)\n\n return", "def plot_results(store):\n plt.figure()\n c = 0\n for i in store.keys():\n plt.scatter(i[0], -1*i[1], color=get_colour(store[i]))\n c += 1\n plt.show()", "def make_scatter():\n x = np.linspace(4, 8, 6)\n y = np.sin(x)\n plt.plot(x, y, 'o', color='black');\n plt.show()", "def exercise_4(self):\n student_data = self.student_data\n # Change the legend order in the scatter plot\n sns.scatterplot(x=\"absences\", y=\"G3\", \n data=student_data, \n hue=\"location\",\n hue_order = [\"Rural\"\n ,\"Urban\"])\n\n # Show plot\n plt.show()", "def plot_scatter(chimera, singles, chisum, lorder, figname, mincount):\n def scatit(dname, dname2, grd, i, j, l1, l2, lln, cur_name):\n \"\"\"\n scatterplot the specific plot\n \"\"\"\n lkeys = set(dname[l1].keys()) & set(dname2[l2].keys())\n xvec = []\n yvec = []\n for k in lkeys:\n if dname[l1][k] > mincount or dname2[l2][k] > mincount:\n xvec.append(dname[l1][k]+1)\n yvec.append(dname2[l2][k]+1)\n spr = spearmanr(xvec, yvec)\n #print \"*\" * 100\n #my addition - print to stdout the pairs and the values\n # print cur_name, l1, l2, spr[0] ,spr[1]\n im = grd[i*lln + j].hexbin(\n xvec, yvec, xscale = 'log', yscale = 'log', bins='log', mincnt=1,\n gridsize=(50,50))\n# grd.cbar_axes[i*lln+j].colorbar(im)\n\n grd[i*lln + j].text(10, 10e4, \"r=%.2f\"%(spr[0]), size=6, color='m')\n grd[i*lln + j].set_xlim([10e0, 10e5])\n grd[i*lln + j].set_ylim([10e0, 10e5])\n grd[i*lln + j].set_yscale('log')\n grd[i*lln + j].set_xscale('log')\n grd[i*lln + j].set_xticks([10e0, 10e2, 10e4])\n# grd[i*lln + j].set_xticklabels([k[0] for k in grd[i*lln + j].get_xticklabels()], rotation=45)\n grd[i*lln + j].set_yticks([10e0, 10e2, 10e4])\n grd[i*lln + j].set_ylabel(l1)\n grd[i*lln + j].set_xlabel(l2, rotation=45)\n# tight_layout()\n return spr\n lln = len(lorder)\n corrs = zeros((lln, lln))\n fig = figure(1, (8, 8), 300)\n rcParams.update({'font.size': 8})\n# f, axarr = subplots(lln, lln, sharex=True, sharey=True)\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols = (lln, lln), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes in inch.\n aspect=True,\n# cbar_mode=\"each\"\n ) \n for i, l1 in enumerate(lorder):\n for j, l2 in enumerate(lorder):\n if i>j: # Print singles\n corrs[i, j] = scatit(\n singles, singles, grid, i, j, l1, l2, lln, \"sing-sing\")[0]\n elif i==j:\n corrs[i, j] =scatit(singles, chisum, grid, i, j, l1, l2, lln, \"sing-chim\")[0]\n else:\n corrs[i, j] = scatit(\n chimera, chimera, grid, i, j, l1, l2, lln, \"chim-chim\")[0]\n xlabel(l1)\n ylabel(l2)\n rcParams.update({'font.size': 8})\n for ax in fig.get_axes():\n ax.tick_params(which='minor', direction='out')\n savefig(figname, dpi=300)\n return corrs", "def scattered():\r\n c = 'A'\r\n i = 'FLR '\r\n data = chart_data(i, '2018-09-01', 12*5, c).set_index('date').sort_index()\r\n # print(data)\r\n data.plot(kind='scatter', x='Perc.idv', y='Perc.ids') # ,c='Centre')\r\n # plt.xticks(range(len(data)),data.index.tolist(),rotation=20)\r\n # plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n # plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + c + indic + '.jpg')", "def scatterResults(X, Xcolour,offset,x_angles,x_angle_location,y_angles,y_angle_location):\n #ind=np.argpartition(X, 4, axis=0)[:4]\n ind=np.argmin(X,axis=0)\n xind = []\n yind = []\n for i in range(len(ind)):\n xind.append(x_angle_location[ind[i]])\n yind.append(y_angle_location[ind[i]])\n plt.scatter(xind,yind, alpha=0.01, color=Xcolour)\n plt.xticks(np.array(range(0,len(x_angles))),x_angles)\n pylab.xlim(-1,len(x_angles)+1)\n plt.yticks(np.array(range(0,len(y_angles))),y_angles)\n pylab.ylim(-1,len(y_angles)+1) \n locs, labels = plt.xticks()\n plt.setp(labels, rotation=90)\n plt.xlabel(\"x-angles\")\n plt.ylabel(\"y-angles\")\n plt.grid(b=True, which='major')", "def scatter(x, colors):\n \n # We choose a color palette with seaborn.\n palette = np.array(sns.color_palette(\"hls\", 2))\n\n # We create a scatter plot.\n f = plt.figure(figsize=(10, 8))\n ax = plt.subplot(aspect='equal')\n sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,\n c=palette[colors.astype(np.int)])\n \n ax.axis('off') # the axis will not be shown\n ax.axis('tight') # makes sure all data is shown\n \n # set title\n plt.title(\"Featurespace Visualization Titanic\", fontsize=25)\n \n # legend with color patches\n survived_patch = mpatches.Patch(color=palette[1], label='Survived')\n died_patch = mpatches.Patch(color=palette[0], label='Died')\n plt.legend(handles=[survived_patch, died_patch], fontsize=20, loc=1)\n\n return f, ax, sc", "def setPointColor(self, color):\n for point in self.points:\n point.color = color", "def plot_scatter(data):\n city_x,city_y = get_city_base()\n fig = plt.figure(figsize = FIGURE_SIZE)\n plt.scatter(data['longitude'],data['latitude'], color = CRIME_POINTS_COLOR, s = SCATTER_SIZE_OF_CRIME_POINTS)\n plt.scatter(city_x,city_y, color = CITY_MAP_COLOR, s = SCATTER_SIZE_OF_CHICAGO_CITY, zorder = CITY_MAP_ORDER)", "def _timeseries_scatter_plot_panel(self, data, axes, project, y_values):\n timesteps = np.linspace(0, 1, len(data[0]))\n if project == \"cmip6\":\n cb_colors = plt.cm.Reds(np.linspace(0, 1, len(data[1])))\n if project == \"cmip5\":\n cb_colors = plt.cm.Blues(np.linspace(0, 1, len(data[1])))\n cb_colors[:, -1] = timesteps\n\n axes.scatter(\n data[0],\n data[1],\n facecolors=\"none\",\n linewidths=0.8,\n s=70,\n color=cb_colors,\n label=self.formatter(project.upper()),\n )\n base_colors = {\"cmip5\": \"#2161A6\", \"cmip6\": \"#BB3437\"}\n # plot regression\n axes.plot(data[0], y_values, color=base_colors[project])\n return base_colors[project]", "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def afficher_XY(X, Y):\n plt.scatter(X,Y, s = size)\n plt.show()", "def draw_point(axis, pnt3d, color):\n xy = bc2xy(pnt3d)\n axis.scatter(xy[0], xy[1], c=color, marker='o')" ]
[ "0.72418195", "0.704063", "0.66666645", "0.6631968", "0.66311115", "0.66116434", "0.65151936", "0.6427394", "0.6364602", "0.63060814", "0.6267257", "0.62623537", "0.62345916", "0.6197361", "0.61629504", "0.6145779", "0.6065766", "0.604194", "0.6001138", "0.59884024", "0.59615815", "0.59374624", "0.5930797", "0.5920489", "0.591406", "0.5881986", "0.58678204", "0.58648896", "0.58627546", "0.5855094" ]
0.81629497
0
returns a list of n random points whose (x,y) coordinates make up a shape (n,2) ndarray; the nrandom points must be distinct in order to assign L as input in the algorithm
def scatter_points(n): P1 = np.random.randn(int(np.ceil(n/2)), 2) - 4 P2 = 3 * np.random.rand(int(np.ceil(n/4)), 2) - np.array([10, 0]) P3 = np.random.randn(int(np.ceil(n/4)), 2) + 3 """ P1=np.floor(P1) P2=np.floor(P2) P3=np.floor(P3) """ L = list(np.concatenate((P1,P2,P3), axis=0)) return L #return no_dupli(L)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_points(N): \n x1 = np.random.uniform(-1,1,N)\n x2 = np.random.uniform(-1,1,N)\n return (x1,x2)", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def random_points(N, condition=None):\n\n def stream():\n \"\"\" An infinite stream of random points. \"\"\"\n while True:\n yield random_point()\n\n if condition is None:\n # approve unconditionally\n indexed_points = enumerate(stream())\n else:\n indexed_points = enumerate(ifilter(condition, stream()))\n\n points = list(takewhile(lambda (i, point): i < N, indexed_points))\n return (numpy.array([theta for _, (theta, _) in points]),\n numpy.array([phi for _, (_, phi) in points]))", "def create_points(N, M):\n arr = numpy.random.randint(1, N+1, size=(M, 2))\n idx = 0\n coords = []\n points = []\n \n for ele in arr:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n \n while idx < M:\n missed = numpy.random.randint(1, N+1, size=(M-idx, 2))\n for ele in missed:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n\n # creates real points in the plane\n idx = 0\n for coord in coords:\n idx += 1\n points.append(Point(id=idx, x=coord[0], y=coord[1]))\n\n return points", "def randomgrid(self, n):\n lam = np.random.random((n, 3))\n return self.normalize(lam)", "def rand_coord(n):\n\n x = random.randint(0, n - 1)\n y = random.randint(0, n - 1)\n return x, y", "def give_rand_points(n_points, xmin, xmax, ymin, ymax, n_dim=2):\n random_points = np.random.rand(n_points, n_dim)\n random_points[:, 0] = random_points[:, 0]*(xmax-xmin)+xmin\n random_points[:, 1] = random_points[:, 1]*(ymax-ymin)+ymin\n\n return random_points", "def random_points(n, shape):\n n = int(n)\n if n < 0:\n raise ValueError('n must be a positive integer')\n else:\n try:\n d = len(shape)\n if d == 0:\n raise ValueError('shape must contain at least one integer value')\n except TypeError:\n raise ValueError('shape must be array-like')\n idx = np.random.choice(np.prod(shape), size=n, replace=False)\n return np.unravel_index(idx, shape)", "def scatter(area, n, z=None, seed=None):\n x1, x2, y1, y2 = area\n numpy.random.seed(seed)\n arrays = [numpy.random.uniform(x1, x2, n), numpy.random.uniform(y1, y2, n)]\n if z is not None:\n arrays.append(z*numpy.ones(n))\n return arrays", "def sample(self, n):\n ret = []\n for i in range(n):\n for j in range(n):\n ret.append(self.a + (((0.5 + i) / n) * self.l1) + (((0.5 + j) / n) * self.l2))\n return ret", "def get_points(self, npoints: int):\n\n R = sorted(np.random.rand(npoints) * 2. * np.pi)\n\n xx = self.cx + self.a * np.cos(R) * np.cos(self.angle_rad) - self.b * np.sin(R) * np.sin(\n self.angle_rad)\n\n yy = self.cy + self.a * np.cos(R) * np.sin(self.angle_rad) + self.b * np.sin(R) * np.cos(\n self.angle_rad)\n\n return R, xx, yy", "def initialization(n, D):\n\n samples = []\n while len(samples) < n:\n # X = np.random.randint(0, D.shape[1], 10*n)\n # Y = np.random.randint(0, D.shape[0], 10*n)\n X = np.random.uniform(0, D.shape[1], 10*n)\n Y = np.random.uniform(0, D.shape[0], 10*n)\n P = np.random.uniform(0, 1, 10*n)\n index = 0\n while index < len(X) and len(samples) < n:\n x, y = X[index], Y[index]\n x_, y_ = int(np.floor(x)), int(np.floor(y))\n if P[index] < D[y_, x_]:\n samples.append([x, y])\n index += 1\n return np.array(samples)", "def pick_chosen_points(m, n):\r\n return [i * n // m + n // (2 * m) for i in range(m)]", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def scatter_vertices(self, n=20000):\n\n vs = self.verts[np.random.choice(len(self.verts), n, replace=False)]\n\n return vs", "def generatePoints(N, k=2, scale=1, same_quadrant=False):\n if same_quadrant:\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n else:\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def generate_uniform_random_points_in_domain(self, num_points, random_source=None):\n output_points = numpy.empty((num_points, self.num_repeats, self.dim))\n # Generate num_repeats sets of points from some sampling (e.g., LHC)\n # Then we \"transpose\" the output ordering: the i-th point in RepeatedDomain is constructed\n # from the i-th points of LHC_1 ... LHC_{num_repeats}\n num_points_array = numpy.empty(self.num_repeats, dtype=numpy.int64)\n for i in xrange(self.num_repeats):\n temp = self._domain.generate_uniform_random_points_in_domain(num_points, random_source=random_source)\n # Since generate_uniform_random_points_in_domain() may not always return num_points\n # points, we need to make sure we only use the valid results\n num_points_array[i] = temp.shape[0]\n output_points[:, i, ...] = temp\n # We can only use the smallest num_points that came out of our draws\n return output_points[:numpy.amin(num_points_array), ...]", "def scatter(area, n, z=None):\n x1, x2, y1, y2 = area\n log.info(\"Generating irregular grid (scatter):\")\n log.info(\" area = (x1, x2, y1, y2) = %s\" % (str((x1,x2,y1,y2))))\n log.info(\" number of points = n = %s\" % (str(n)))\n xcoords = numpy.random.uniform(x1, x2, n)\n ycoords = numpy.random.uniform(y1, y2, n)\n if z is not None:\n log.info(\" z = %s\" % (str(z)))\n zcoords = z*numpy.ones(n)\n return [xcoords, ycoords, zcoords]\n else:\n return [xcoords, ycoords]", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def _ScatterXUniformly(self, num_points, lattice_sizes, input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes - 1.0) for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def generate_point_cloud(n:int, d:int = 2, seed=1234) -> np.ndarray:\n initial_seed = np.random.get_state()\n np.random.seed(seed)\n points = np.random.rand(n, d)\n np.random.set_state(initial_seed)\n return points", "def sample_X(self, m, n):\n return np.random.permutation(m)[:n]", "def get_random_pos(self):\n i = np.random.randint(self.n)\n j = np.random.randint(self.m)\n return [i, j]", "def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst", "def gen_test_points(n=50, extent=(0,0,100,100), rand_seed=None):\n if rand_seed:\n random.seed(rand_seed)\n return [(random.randint(extent[0], extent[2]), random.randint(extent[1], extent[3]))\n for i in xrange(n)]", "def random_point(self, n_samples=1, bound=1.0):\n samples = self._iterate_over_factors(\n \"random_point\", {\"n_samples\": n_samples, \"bound\": bound}\n )\n return samples", "def create_random_grid(N):\n return np.random.choice(values, N*N, p=[0.2, 0.8]).reshape(N, N)", "def create_points(number): \n\n # generate x and y coordinates:\n x = np.random.permutation(2*number)[:number] - number\n y = np.random.permutation(2*number)[:number] - number\n\n points = [ { 0 : float(x[i]), 1 : float(y[i]), \"index\" : i} for i in range(len(x)) ]\n\n return points\n\n # generate points as coordinate pairs of floats.\n # return zip(map(float,x),map(float,y))", "def uniform_but_one_dataset(n, p):\n elements = []\n for i in range(n):\n elements.append((i, 1))\n elements.append((1, (n**(1.0 / p)) - 1))\n return elements" ]
[ "0.7717317", "0.76276845", "0.6761621", "0.67410785", "0.6725108", "0.6723394", "0.6693359", "0.66715723", "0.6646731", "0.66218066", "0.6573408", "0.6567307", "0.6556614", "0.64901435", "0.64619774", "0.64348775", "0.6433242", "0.6387248", "0.6371732", "0.6369228", "0.63603467", "0.6354784", "0.6346886", "0.63386863", "0.62847394", "0.6284585", "0.6276075", "0.62734634", "0.62700915", "0.62579894" ]
0.77319294
0
function returning the points from the list L situated on the left side of the line passing through p of direction v p = point on the line, v=directional vector of the line p and v are (2,) ndarrays
def side_points(p, v, L): u = np.array([-v[1], v[0]]) # positive normal of v: N = list() # list of points on one side of the line p,v: for k in range(len(L)): if (L[k] - p).dot(u) >= 0: N.append(L[k]) return N
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_verts(v_l, v_r):\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points", "def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)", "def point_list(self,res,llc,urc,direction):\n\t\tif direction == 2:\n\t\t\tZdist=urc[2]-llc[2]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,0,deltaZ*i]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 1:\n\t\t\tZdist=urc[1]-llc[1]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,deltaZ*i,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 0:\n\t\t\tZdist=urc[0]-llc[0]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([deltaZ*i,0,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def light_source_directions():\n L = np.array([[-0.06059872, -0.44839055, 0.8917812],\n [-0.05939919, -0.33739538, 0.93948714],\n [-0.05710194, -0.21230722, 0.97553319],\n [-0.05360061, -0.07800089, 0.99551134],\n [-0.04919816, 0.05869781, 0.99706274],\n [-0.04399823, 0.19019233, 0.98076044],\n [-0.03839991, 0.31049925, 0.9497977],\n [-0.03280081, 0.41611025, 0.90872238],\n [-0.18449839, -0.43989616, 0.87889232],\n [-0.18870114, -0.32950199, 0.92510557],\n [-0.1901994, -0.20549935, 0.95999698],\n [-0.18849605, -0.07269848, 0.97937948],\n [-0.18329657, 0.06229884, 0.98108166],\n [-0.17500445, 0.19220488, 0.96562453],\n [-0.16449474, 0.31129005, 0.93597008],\n [-0.15270716, 0.4160195, 0.89644202],\n [-0.30139786, -0.42509698, 0.85349393],\n [-0.31020115, -0.31660118, 0.89640333],\n [-0.31489186, -0.19549495, 0.92877599],\n [-0.31450962, -0.06640203, 0.94692897],\n [-0.30880699, 0.06470146, 0.94892147],\n [-0.2981084, 0.19100538, 0.93522635],\n [-0.28359251, 0.30729189, 0.90837601],\n [-0.26670649, 0.41020998, 0.87212122],\n [-0.40709586, -0.40559588, 0.81839168],\n [-0.41919869, -0.29999906, 0.85689732],\n [-0.42618633, -0.18329412, 0.88587159],\n [-0.42691512, -0.05950211, 0.90233197],\n [-0.42090385, 0.0659006, 0.90470827],\n [-0.40860354, 0.18720162, 0.89330773],\n [-0.39141794, 0.29941372, 0.87013988],\n [-0.3707838, 0.39958255, 0.83836338],\n [-0.499596, -0.38319693, 0.77689378],\n [-0.51360334, -0.28130183, 0.81060526],\n [-0.52190667, -0.16990217, 0.83591069],\n [-0.52326874, -0.05249686, 0.85054918],\n [-0.51720021, 0.06620003, 0.85330035],\n [-0.50428312, 0.18139393, 0.84427174],\n [-0.48561334, 0.28870793, 0.82512267],\n [-0.46289771, 0.38549809, 0.79819605],\n [-0.57853599, -0.35932235, 0.73224555],\n [-0.59329349, -0.26189713, 0.76119165],\n [-0.60202327, -0.15630604, 0.78303027],\n [-0.6037003, -0.04570002, 0.7959004],\n [-0.59781529, 0.06590169, 0.79892043],\n [-0.58486953, 0.17439091, 0.79215873],\n [-0.56588359, 0.27639198, 0.77677747],\n [-0.54241965, 0.36921337, 0.75462733],\n [0.05220076, -0.43870637, 0.89711304],\n [0.05199786, -0.33138635, 0.9420612],\n [0.05109826, -0.20999284, 0.97636672],\n [0.04919919, -0.07869871, 0.99568366],\n [0.04640163, 0.05630197, 0.99733494],\n [0.04279892, 0.18779527, 0.98127529],\n [0.03870043, 0.30950341, 0.95011048],\n [0.03440055, 0.41730662, 0.90811441],\n [0.17290651, -0.43181626, 0.88523333],\n [0.17839998, -0.32509996, 0.92869988],\n [0.18160174, -0.20480196, 0.96180921],\n [0.18200745, -0.07490306, 0.98044012],\n [0.17919505, 0.05849838, 0.98207285],\n [0.17329685, 0.18839658, 0.96668244],\n [0.1649036, 0.30880674, 0.93672045],\n [0.1549931, 0.41578148, 0.89616009],\n [0.28720483, -0.41910705, 0.8613145],\n [0.29740177, -0.31410186, 0.90160535],\n [0.30420604, -0.1965039, 0.9321185],\n [0.30640529, -0.07010121, 0.94931639],\n [0.30361153, 0.05950226, 0.95093613],\n [0.29588748, 0.18589214, 0.93696036],\n [0.28409783, 0.30349768, 0.90949304],\n [0.26939905, 0.40849857, 0.87209694],\n [0.39120402, -0.40190413, 0.8279085],\n [0.40481085, -0.29960803, 0.86392315],\n [0.41411685, -0.18590756, 0.89103626],\n [0.41769724, -0.06449957, 0.906294],\n [0.41498764, 0.05959822, 0.90787296],\n [0.40607977, 0.18089099, 0.89575537],\n [0.39179226, 0.29439419, 0.87168279],\n [0.37379609, 0.39649585, 0.83849122],\n [0.48278794, -0.38169046, 0.78818031],\n [0.49848546, -0.28279175, 0.8194761],\n [0.50918069, -0.1740934, 0.84286803],\n [0.51360856, -0.05870098, 0.85601427],\n [0.51097962, 0.05899765, 0.8575658],\n [0.50151639, 0.17420569, 0.84742769],\n [0.48600297, 0.28260173, 0.82700506],\n [0.46600106, 0.38110087, 0.79850181],\n [0.56150442, -0.35990283, 0.74510586],\n [0.57807114, -0.26498677, 0.77176147],\n [0.58933134, -0.1617086, 0.7915421],\n [0.59407609, -0.05289787, 0.80266769],\n [0.59157958, 0.057798, 0.80417224],\n [0.58198189, 0.16649482, 0.79597523],\n [0.56620006, 0.26940003, 0.77900008],\n [0.54551481, 0.36380988, 0.7550205]], dtype=float)\n return L", "def next_in_hull(p, v, L): \r\n N = normalize(p, L)\r\n if N != []:\r\n q = N[0]\r\n index = 0\r\n for k in range(1, len(N)):\r\n if (N[k] - q).dot(v) >= 0: # points on support line included\r\n q = N[k]\r\n index = k\r\n \r\n return index", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def project_points_line(points, line):\n return [project_point_line(point, line) for point in points]", "def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)", "def point(L, lam):\n lam = arg.getvector(lam, out='row')\n return L.pp.reshape((3,1)) + L.uw.reshape((3,1)) * lam", "def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)", "def split_LR(pts, axis):\n left_pts = []\n right_pts = []\n\n for i, pt in enumerate(pts):\n if sign_line(pt, P1, P2) > 0:\n left_pts.append(pt)\n else:\n right_pts.append(pt)\n\n return left_pts, right_pts", "def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]", "def vectorsToPoints(vectorList,points):\n \"\"\" It takes in a list of vectors and an empty points list (for recursion) \"\"\"\n\n if len(points) == 0: # first call\n points.append([vectorList[0][0],vectorList[0][1]])\n vectorsToPoints(vectorList[1:],points)\n else:\n points.append([round(points[-1][0]+vectorList[0][0],2),\n round(points[-1][1]+vectorList[0][1],2)])\n if len(vectorList) == 1: # base case\n return\n else:\n vectorsToPoints(vectorList[1:],points) # recursive call\n\n return points", "def project_point_to_line(points, reference_points_of_lines, vectors_along_lines):\n k = check_shape_any(points, (3,), (-1, 3), name=\"points\")\n check_shape_any(\n reference_points_of_lines,\n (3,),\n (-1 if k is None else k, 3),\n name=\"reference_points_of_lines\",\n )\n vg.shape.check(locals(), \"vectors_along_lines\", reference_points_of_lines.shape)\n\n return reference_points_of_lines + vg.project(\n points - reference_points_of_lines, onto=vectors_along_lines\n )", "def get_point_from_lengths_x_first(xA, yA, xB, yB, l2, l3, direction='right'):\n\n A = ((l2**2 - l3**2) + (xB**2 - xA**2) + (yB**2 - yA**2)) / (2 * (yB - yA))\n B = (xA - xB) / (yB - yA)\n\n a = B**2 + 1\n b = 2 * A * B - 2 * xA - 2 * yA * B\n c = A**2 + xA**2 + yA**2 - l2**2 - 2 * yA * A\n\n xC = solve_quadratic_equation(a, b, c, direction)\n yC = A + B * xC\n\n return [xC, yC]", "def vector_line(self):\n assert len(self.xcoords) == 2\n diff_x = self.xcoords[1] - self.xcoords[0]\n diff_z = self.zcoords[1] - self.zcoords[0]\n vec = np.hstack((diff_x, diff_z))\n return vec", "def make_points(self,image,line):\n print(\"This is line inside make_points: \",line)\n try:\n slope, intercept = line\n y1 = int(image.shape[0]) # bottom of the image\n y2 = int(y1*3/5) # slightly lower than the middle\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n return [[x1, y1, x2, y2]]\n except:\n return None", "def segmentline(l,u1,u2):\n p1=sampleline(l,u1)\n p2=sampleline(l,u2)\n return [p1,p2]", "def find_out_difference_perpendiculars(lap, ref_lap):\n\n distances = []\n\n for i in lap.index:\n point = lap.loc[i]\n\n closest_index = find_closest_point(point, ref_lap)\n closest_point = ref_lap.loc[closest_index]\n\n neighbor_i = len(ref_lap) - 1 if closest_index == 0 else closest_index - 1\n neighbor1 = ref_lap.loc[neighbor_i]\n neighbor_i = 0 if len(ref_lap) == closest_index + 1 else closest_index + 1\n neighbor2 = ref_lap.loc[neighbor_i]\n\n v1 = create_vector(closest_point, point)\n v2 = create_vector(closest_point, neighbor1)\n v3 = create_vector(closest_point, neighbor2)\n\n angle1 = find_angle_between_vectors(v1, v2)\n angle2 = find_angle_between_vectors(v1, v3)\n\n degrees90 = math.pi / 2\n min_dist = -1\n if angle1 > degrees90 and angle2 > degrees90:\n min_dist = line_length(point.LAT, point.LON, closest_point.LAT, closest_point.LON)\n elif angle1 < degrees90 and angle2 < degrees90:\n dist1 = find_shortest_distance(point, closest_point, neighbor1)\n dist2 = find_shortest_distance(point, closest_point, neighbor2)\n min_dist = dist1 if dist1 <= dist2 else dist2\n elif angle1 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor1)\n elif angle2 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor2)\n\n if min_dist == -1:\n print('ERROR: Could not find distance')\n print(\"Indices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif math.isnan(min_dist):\n print(\"NAN value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif min_dist < 0:\n print(\"Negative value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n else:\n min_dist = degrees2kilometers(min_dist) * 100000 # in centimeters\n distances.append(min_dist)\n\n return distances", "def waypts2setpts(P, params):\n\tV = params.drone_vel # [m/s]\n\tfreq = params.ViconRate; dt = 1./freq\n\tdx = V * dt\n\ttraj_global = np.array(P[-1])\n\tfor i in range(len(P)-1, 0, -1):\n\t\tA = P[i]\n\t\tB = P[i-1]\n\n\t\tn = (B-A) / norm(B-A)\n\t\tdelta = n * dx\n\t\tN = int( norm(B-A) / norm(delta) )\n\t\tsp = A\n\t\ttraj_global = np.vstack([traj_global, sp])\n\t\tfor i in range(N):\n\t\t\tsp += delta\n\t\t\ttraj_global = np.vstack([traj_global, sp])\n\t\tsp = B\n\t\ttraj_global = np.vstack([traj_global, sp])\n\n\treturn traj_global", "def _points_on_the_right(self, pt1, pt2, point_list):\n new_list = []\n for pt3 in point_list:\n if oriented_area(pt1, pt2, pt3) < 0:\n new_list.append(pt3)\n return new_list", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def adjacent_linear_directional(up_down=True):\n\n if up_down:\n return [Point(0, 1), Point(0, -1)]\n else:\n return [Point(1, 0), Point(-1, 0)]", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def get_polygon_points(starting_point, vector_seq):\n \n res=[[]]\n res[0] = starting_point\n curr_point = starting_point\n\n i=1\n\n while i<len(vector_seq):\n if are_parallel(vector_seq[i],vector_seq[i-1]):\n tmp = vector_seq[i]\n vector_seq[i-1][0]=vector_seq[i-1][0]+ tmp[0]\n vector_seq[i-1][1]=vector_seq[i-1][1]+ tmp[1]\n vector_seq.pop(i)\n else:\n i=i+1\n \n for x in vector_seq:\n x_coord = curr_point[0]+x[0]\n y_coord = curr_point[1]+x[1]\n curr_point=[x_coord, y_coord]\n res.append(curr_point)\n\n return res", "def convex_line_segment(point_list:list, desc_y:bool=False, desc_x:bool=False)->list:\n if len(point_list) < 3:\n return point_list\n line = []\n x_extrema = None\n # Since the list is sorted by x second, the last point is actually the\n # first point of the last block of y values in the list (if more than\n # one coordinate has the minimum y value).\n last_point = point_list[-1]\n test_point = -2\n while point_list[test_point][1] == last_point[1]:\n last_point = point_list[test_point]\n test_point -= 1\n for point in point_list:\n # We end when we get to the last point. Points with the same y-value, but\n # more inside x-value won't be on the polygon.\n if point == last_point: \n break\n # We skip points that are left of the point we have added already.\n if not x_extrema is None:\n if desc_x and x_extrema >= point[0]:\n continue\n elif not desc_x and x_extrema <= point[0]:\n continue\n # If the line is empty, we just add it.\n if not line:\n line.append(point)\n x_extrema = point[0]\n continue\n dir = direction(line[-1], point, last_point)\n if not desc_y == desc_x:\n dir *= -1\n if dir > 0: # if and only if the polygon stays convex by adding this point...\n if len(line) > 1 and collinear(line[-2], line[-1], point):\n # We remove collinear points to match what Graham's scan does.\n del line[-1]\n line.append(point)\n x_extrema = point[0]\n # We end by adding the last point to the list to complete the line.\n line.append(last_point)\n return line", "def decomposing_line_cut_by_splicing(P, v, w):\n\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_cut_as_line: W not on chain\")\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\tif distance_to_w >= chain.length or distance_to_w == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_v)\n\n\t\tp_l = left_chain.coords[:]\n\t\tp_r = right_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\tif distance_to_v >= chain.length or distance_to_v == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_w)\n\n\t\tp_l = right_chain.coords[:]\n\t\tp_r = left_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\n\tif distance_to_w > distance_to_v:\n\n\t\tleft_v_cut, right_v_cut = cut_linestring(chain, distance_to_v)\n\n\t\tdistance_to_w = right_v_cut.project(w_Point)\n\t\tleft_w_chain, right_w_chain = cut_linestring(right_v_cut, distance_to_w)\n\n\t\tp_l = left_v_cut.coords[:]+right_w_chain.coords[:-1]\n\t\tp_r = left_w_chain.coords[:]\n\n\t\treturn p_l, p_r\n\n\telse:\n\n\t\tleft_w_cut, right_w_cut = cut_linestring(chain, distance_to_w)\n\n\t\tdistance_to_v = right_w_cut.project(v_Point)\n\t\tleft_v_chain, right_v_chain = cut_linestring(right_w_cut, distance_to_v)\n\n\t\tp_l = left_w_cut.coords[:]+right_v_chain.coords[:-1]\n\t\tp_r = left_v_chain.coords[:]\n\n\t\treturn p_l, p_r" ]
[ "0.71643597", "0.7128908", "0.68142533", "0.67079157", "0.65747714", "0.6572509", "0.6561779", "0.6409816", "0.63894665", "0.6302099", "0.62836653", "0.62619257", "0.6258987", "0.6258032", "0.62064075", "0.6179941", "0.61505497", "0.6107346", "0.6048671", "0.6048366", "0.60101086", "0.6003186", "0.5998594", "0.5993805", "0.5993638", "0.5980014", "0.5974279", "0.59617877", "0.59615123", "0.59607244" ]
0.7856908
0
finds the next point of the convex hull; p = curent point; v= direction of support line; (p, v) defines the support line trough p of direction v; L = list of points on the left side of the support line; where the new point of the convex hull is searched for;
def next_in_hull(p, v, L): N = normalize(p, L) if N != []: q = N[0] index = 0 for k in range(1, len(N)): if (N[k] - q).dot(v) >= 0: # points on support line included q = N[k] index = k return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convex_hull(L):\r\n CH=list()\r\n if L != []:\r\n P = list(L)\r\n # find the starting point of the algorithm and add it to the convex hull:\r\n ind0 = find_start(P)\r\n CH.append(P.pop(ind0))\r\n # find the next point and add it to the convex hull list CH:\r\n if P != []:\r\n ind1 = next_in_hull(CH[0], np.array([1,0]), P)\r\n CH.append(P.pop(ind1))\r\n # use the hyperplane criterion as function side_points to complete CH:\r\n while P != []:\r\n p = CH[-2]\r\n q = CH[-1]\r\n v = q - p \r\n P = side_points(CH[0], CH[-1] - CH[0], P)\r\n ind = next_in_hull(q, v, P)\r\n if P != []:\r\n CH.append(P.pop(ind))\r\n return CH", "def side_points(p, v, L): \r\n u = np.array([-v[1], v[0]]) # positive normal of v:\r\n N = list() # list of points on one side of the line p,v:\r\n for k in range(len(L)):\r\n if (L[k] - p).dot(u) >= 0:\r\n N.append(L[k])\r\n \r\n return N", "def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)", "def convex_hull(l):\n\tpass", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def convex_line_segment(point_list:list, desc_y:bool=False, desc_x:bool=False)->list:\n if len(point_list) < 3:\n return point_list\n line = []\n x_extrema = None\n # Since the list is sorted by x second, the last point is actually the\n # first point of the last block of y values in the list (if more than\n # one coordinate has the minimum y value).\n last_point = point_list[-1]\n test_point = -2\n while point_list[test_point][1] == last_point[1]:\n last_point = point_list[test_point]\n test_point -= 1\n for point in point_list:\n # We end when we get to the last point. Points with the same y-value, but\n # more inside x-value won't be on the polygon.\n if point == last_point: \n break\n # We skip points that are left of the point we have added already.\n if not x_extrema is None:\n if desc_x and x_extrema >= point[0]:\n continue\n elif not desc_x and x_extrema <= point[0]:\n continue\n # If the line is empty, we just add it.\n if not line:\n line.append(point)\n x_extrema = point[0]\n continue\n dir = direction(line[-1], point, last_point)\n if not desc_y == desc_x:\n dir *= -1\n if dir > 0: # if and only if the polygon stays convex by adding this point...\n if len(line) > 1 and collinear(line[-2], line[-1], point):\n # We remove collinear points to match what Graham's scan does.\n del line[-1]\n line.append(point)\n x_extrema = point[0]\n # We end by adding the last point to the list to complete the line.\n line.append(last_point)\n return line", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def convex_hull_model(self, _start='random', _direction='random'):\n steps = [{'Tour': [], 'Tourlength': 0}]\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n\n if nodes:\n # Step 1: Sketch the connections between adjacent boundary\n # points of the convex hull.\n # hull is a list of ids, not nodes,the hull is always generated CW\n hull = tsputil.convex_hull_helper(nodes)\n # Step 2: Select a starting point and a direction (randomly).\n # start is an id not a node\n startinfo = get_direction_and_start(nodes, _start, _direction)\n start = startinfo[0]\n # if direction is ccw ,reverse hull\n if not startinfo[1] == 1:\n hull.reverse()\n\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n # Step 3: If the starting point is on the boundary,\n # the starting node is the current node. \"\"\"\n if start in hull:\n # The arc connecting the current node to the adjacent boundary\n # node in the direc- tion of travel is referred to as the\n # current arc.\n cn_index = hull.index(start)\n current_node = hull[cn_index]\n # get adjacent node\n an_index = (cn_index + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Proceed immediately to Step 4.\"\"\"\n else:\n # If the starting point is not on the boundary, apply the\n # insertion rule to find the closest arc on the boundary. \"\"\"\n closest_arc = find_closest_arc(start, hull, nodes)\n # Connect the starting point to the end node of the closest\n # arc which is in the direction of travel.\n # This node becomes the current node.\"\"\"\n # insert startnode into hull\n hull.insert(hull.index(closest_arc[0]) + 1, start)\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n # update current arc nodes\n current_node = start\n adjacent_node = hull[hull.index(closest_arc[1])]\n # Step 4: Apply the insertion criterion to identify which\n # unconnected interior point is closest to the current arc.\n # repeat step 4 and 5 until all nodes are included in the path\n while len(hull) <= len(nodes):\n while True:\n current_arc = (current_node, adjacent_node)\n # find closest node not in the hull\n interior_node = find_closest_interior_node(current_arc, hull, nodes)\n # Apply the insertion criterion to check whether the\n # closest node is closer to any other arc.\n is_closer = is_closer_to_other_arc(interior_node, current_arc, hull, nodes)\n # If not, proceed to Step 5. If it is, move to the end node of\n # the current arc. This becomes the current node. Repeat\n # Step 4.\n if not is_closer:\n break\n else:\n current_node = current_arc[1]\n an_index = (hull.index(current_node) + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Step 5: Insert the closest node. The connection between the\n # current node and the newly inserted node becomes the current arc.\n # Retaining the current node, return to Step 4 and repeat Steps 4 and\n # 5 until a complete tour is obtained\"\"\"\n hull.insert(hull.index(current_node) + 1, interior_node)\n adjacent_node = interior_node\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n self._datacontroller.commit_change('pathsteps', steps)\n self._datacontroller.commit_change('path', steps[-1])", "def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []", "def select_candidate_point(edge:tuple, points:list, hull:list, min_cosine:float=-1)->tuple:\n min_sqr_distance = None\n selected = None\n for point in points:\n nearest_point = closest_line_point(point, edge)\n if not near_segment(nearest_point, edge):\n # We ignore points that wouldn't be above or below the edge if rotated horizontal\n continue\n sqr_distance = point_sqr_distance(nearest_point, point)\n if not(min_sqr_distance is None) and min_sqr_distance < sqr_distance:\n # We ignore points that aren't a candidate for minimum distance\n continue\n vector_a = vectorize(edge[0], point)\n vector_b = vectorize(edge[1], point)\n cos_angle = vector_cosine_angle(vector_a, vector_b)\n if cos_angle is None or cos_angle > min_cosine:\n # We ignore points that would make an angle smaller (tighter) than the minimum angle.\n continue\n new_segments = [(edge[0], point), (point, edge[1])]\n if segments_intersects_hull(new_segments, hull, edge):\n # We ignore points that would cause the hull to self-intersect\n continue\n selected = point\n min_sqr_distance = sqr_distance\n return selected", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def addPoint(self, p):\n p = np.asarray(p)\n idx = len(self.coords)\n # print(\"coords[\", idx,\"] ->\",p)\n self.coords.append(p)\n\n # Search the triangle(s) whose circumcircle contains p\n bad_triangles = []\n for T in self.triangles:\n # Choose one method: inCircleRobust(T, p) or inCircleFast(T, p)\n if self.inCircleFast(T, p):\n bad_triangles.append(T)\n\n # Find the CCW boundary (star shape) of the bad triangles,\n # expressed as a list of edges (point pairs) and the opposite\n # triangle to each edge.\n boundary = []\n # Choose a \"random\" triangle and edge\n T = bad_triangles[0]\n edge = 0\n # get the opposite triangle of this edge\n while True:\n # Check if edge of triangle T is on the boundary...\n # if opposite triangle of this edge is external to the list\n tri_op = self.triangles[T][edge]\n if tri_op not in bad_triangles:\n # Insert edge and external triangle into boundary list\n boundary.append((T[(edge+1) % 3], T[(edge-1) % 3], tri_op))\n\n # Move to next CCW edge in this triangle\n edge = (edge + 1) % 3\n\n # Check if boundary is a closed loop\n if boundary[0][0] == boundary[-1][1]:\n break\n else:\n # Move to next CCW edge in opposite triangle\n edge = (self.triangles[tri_op].index(T) + 1) % 3\n T = tri_op\n\n # Remove triangles too near of point p of our solution\n for T in bad_triangles:\n del self.triangles[T]\n del self.circles[T]\n\n # Retriangle the hole left by bad_triangles\n new_triangles = []\n for (e0, e1, tri_op) in boundary:\n # Create a new triangle using point p and edge extremes\n T = (idx, e0, e1)\n\n # Store circumcenter and circumradius of the triangle\n self.circles[T] = self.circumcenter(T)\n\n # Set opposite triangle of the edge as neighbour of T\n self.triangles[T] = [tri_op, None, None]\n\n # Try to set T as neighbour of the opposite triangle\n if tri_op:\n # search the neighbour of tri_op that use edge (e1, e0)\n for i, neigh in enumerate(self.triangles[tri_op]):\n if neigh:\n if e1 in neigh and e0 in neigh:\n # change link to use our new triangle\n self.triangles[tri_op][i] = T\n\n # Add triangle to a temporal list\n new_triangles.append(T)\n\n # Link the new triangles each another\n N = len(new_triangles)\n for i, T in enumerate(new_triangles):\n self.triangles[T][1] = new_triangles[(i+1) % N] # next\n self.triangles[T][2] = new_triangles[(i-1) % N] # previous", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)", "def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n", "def concave_hull(hull:list, points:list, max_iterations:int=None, min_length_fraction:float=0, min_angle:float=90)->list:\n tweet.info(\"Creating concave hull; minimum side length {}% of average, minimum_angle {}\".format(min_length_fraction * 100, min_angle))\n test_points = set(points)\n ignore_points = []\n avg_sqr_distance = 0\n for k in range(0, len(hull)-1):\n avg_sqr_distance += point_sqr_distance(hull[k], hull[k+1])\n test_points.remove(hull[k])\n avg_sqr_distance /= len(hull) - 1\n min_sqr_length = avg_sqr_distance * (min_length_fraction ** 2) # since we get sqr_length, we square the fraction\n min_cosine = math.cos(math.radians(min_angle))\n \n while (max_iterations is None or max_iterations > 0) and test_points:\n selection, edge = select_longest_edge(hull, ignore_points, min_sqr_length)\n tweet.info(\"Considering edge {}; {} points left\".format(edge, len(test_points)))\n if selection is None:\n break\n selected_point = select_candidate_point(edge, test_points, hull, min_cosine)\n if selected_point is None:\n # This edge has no more candidate points, so we ignore it in the next pass\n ignore_points.append(edge[0])\n tweet.debug(\"No candidate point found.\")\n continue\n tweet.debug(\"Found point {}, inserting new edge.\".format(selected_point))\n if not max_iterations is None:\n max_iterations -= 1\n # We add the point into the concave hull\n hull.insert(selection + 1, selected_point)\n test_points.remove(selected_point)\n return hull", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def get_point(k, refpt):\n i = 0\n while i < k:\n rho, theta = np.random.uniform(r, 2*r), np.random.uniform(0, 2*np.pi)\n pt = refpt[0] + rho*np.cos(theta), refpt[1] + rho*np.sin(theta), 0\n if not (0 <= pt[0] < width and 0 <= pt[1] < height):\n # This point falls outside the domain of the grid, so try again.\n i += 1\n continue\n if point_valid(pt) and is_on_face(pt, v1, v2, v3):\n return pt\n i += 1\n # We failed to find a suitable point in the vicinity of refpt.\n return False", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def _convex_hull_side(image, start, end):\n\n convex_points = [start]\n\n x_start, y_start = start\n x_end, y_end = end\n\n side = (x_start <= x_end, y_start <= y_end)\n\n\n ranges = {\n (True, True): [\n [x_start + 1, x_end + 1],\n [y_start, y_end + 1],\n False\n ],\n (False, True): [\n [y_start + 1, y_end + 1],\n [x_start, x_end - 1, -1],\n True\n ],\n (False, False): [\n [x_start - 1, x_end - 1, -1],\n [y_start, y_end - 1, -1],\n False\n ],\n (True, False): [\n [y_start - 1, y_end - 1, -1],\n [x_start, x_end + 1],\n True\n ]\n }\n\n prev = 0\n\n for outer in range(*ranges[side][0]):\n\n curr_pixel = None\n\n for inner in range(*ranges[side][1]):\n if ranges[side][2] and image[outer, inner] == 0:\n curr_pixel = (inner, outer)\n break\n elif not ranges[side][2] and image[inner, outer] == 0:\n curr_pixel = (outer, inner)\n break\n\n if curr_pixel is None:\n continue\n\n while True:\n # slope infinite for first point\n prev_slope = (\n float(\"-inf\") if prev == 0\n else slope(\n convex_points[prev - 1],\n convex_points[prev],\n ranges[side][2]))\n\n # remove previous point if it yields concave segment\n if prev_slope > slope(\n convex_points[prev],\n curr_pixel,\n ranges[side][2]\n ):\n convex_points.pop(prev)\n prev -= 1\n # add point to hull if it yields convex segment\n else:\n convex_points.append(curr_pixel)\n prev += 1\n break\n\n return convex_points[1:]", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def graham_scan(points):\n\n # Find point with smallest y coordinate\n # If two points have equal y coordinates, select the one with the lower x-coordinate\n smallest = points[0]\n for p in points:\n if p[1] < smallest[1]:\n smallest = p\n elif p[1] == smallest[1]:\n if p[0] < smallest[0]:\n smallest = p\n\n # Sort points by angle over smallest to x-axis\n points.sort(key=lambda x: angle(x, smallest))\n\n # Our stack\n hull = [smallest, points[1]]\n i = 2\n while i < len(points):\n # If the last points and the new point form a counter-clockwise triangle,\n # we need the last point. Therefore, push the new point\n if ccw(hull[-2], hull[-1], points[i]) > 0 or len(hull) == 2:\n hull.append(points[i])\n i += 1\n # If the two last points and the new point don't form a counter-clockwise triangle,\n # the we don't need the last point\n else:\n hull.pop()\n return hull", "def convex_hull(self):\n if isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"not implemented for geographical coordinate \"\n \"systems. Project to a projected coordinate system.\")\n\n points = [pt for pt in self]\n\n # Find the lowermost (left?) point\n pt0 = points[0]\n idx = 0\n for i, pt in enumerate(points[1:]):\n if (pt.y < pt0.y) or ((pt.y == pt0.y) and (pt.x < pt0.x)):\n pt0 = pt\n idx = i+1\n points.pop(idx)\n\n # Sort CCW relative to pt0, and drop all but farthest of any duplicates\n points.sort(key=lambda pt: pt0.distance(pt))\n points.sort(key=lambda pt: _cvectorgeo.polarangle(pt0.vertex, pt.vertex))\n alpha = -1\n drop = []\n for i,pt in enumerate(points):\n a = _cvectorgeo.polarangle(pt0.vertex, pt.vertex)\n if a == alpha:\n drop.append(i)\n else:\n alpha = a\n\n if len(drop) != 0:\n for i in drop[::-1]:\n points.pop(i)\n\n # initialize convex hull\n if len(points) == 2:\n return Polygon([pt0, points[0], points[1]])\n elif len(points) == 1:\n raise GeometryError(\"convex polygon not defined for two points\")\n else:\n\n S = [pt0, points[0], points[1]]\n for pt in points[2:]:\n while not _cvectorgeo.isleft(S[-2].vertex, S[-1].vertex, pt.vertex):\n S.pop()\n S.append(pt)\n\n return Polygon(S, crs=self.crs)", "def get_hull_points(self, show_progress):\n if self.points and not self.hull_points:\n self.graham_scan(show_progress)\n print(\"Input: {} points\").format(len(self.points))\n print(\"Convex hull: {} points\").format(len(self.hull_points))\n return self.hull_points", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper" ]
[ "0.7018159", "0.64776105", "0.6332543", "0.6327088", "0.6299082", "0.62214756", "0.62113076", "0.61873055", "0.61550856", "0.61229056", "0.60984683", "0.602469", "0.59836024", "0.5978045", "0.5922133", "0.59168583", "0.591337", "0.5874701", "0.58707666", "0.5819191", "0.58015865", "0.5751218", "0.57394654", "0.57354635", "0.57184047", "0.5716312", "0.5692943", "0.5692488", "0.5671612", "0.5610023" ]
0.7877421
0
the main function returning corner points of the convex hull of L
def convex_hull(L): CH=list() if L != []: P = list(L) # find the starting point of the algorithm and add it to the convex hull: ind0 = find_start(P) CH.append(P.pop(ind0)) # find the next point and add it to the convex hull list CH: if P != []: ind1 = next_in_hull(CH[0], np.array([1,0]), P) CH.append(P.pop(ind1)) # use the hyperplane criterion as function side_points to complete CH: while P != []: p = CH[-2] q = CH[-1] v = q - p P = side_points(CH[0], CH[-1] - CH[0], P) ind = next_in_hull(q, v, P) if P != []: CH.append(P.pop(ind)) return CH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convex_hull(l):\n\tpass", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull", "def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def give_convex_hull(rand_points):\n return ConvexHull(rand_points)", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper", "def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)", "def convex(points):\r\n if isinstance(points, np.ndarray):\r\n points = np.unique(points, axis=0)\r\n else:\r\n pts = []\r\n points = [pts.append(i) for i in points if i not in pts] # Remove duplicates\r\n del pts\r\n if len(points) <= 1:\r\n return points\r\n # Build lower hull\r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n #print(\"lower\\n{}\\nupper\\n{}\".format(lower, upper))\r\n return np.array(lower[:-1] + upper) # upper[:-1]) # for open loop\r", "def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]", "def concave_hull(hull:list, points:list, max_iterations:int=None, min_length_fraction:float=0, min_angle:float=90)->list:\n tweet.info(\"Creating concave hull; minimum side length {}% of average, minimum_angle {}\".format(min_length_fraction * 100, min_angle))\n test_points = set(points)\n ignore_points = []\n avg_sqr_distance = 0\n for k in range(0, len(hull)-1):\n avg_sqr_distance += point_sqr_distance(hull[k], hull[k+1])\n test_points.remove(hull[k])\n avg_sqr_distance /= len(hull) - 1\n min_sqr_length = avg_sqr_distance * (min_length_fraction ** 2) # since we get sqr_length, we square the fraction\n min_cosine = math.cos(math.radians(min_angle))\n \n while (max_iterations is None or max_iterations > 0) and test_points:\n selection, edge = select_longest_edge(hull, ignore_points, min_sqr_length)\n tweet.info(\"Considering edge {}; {} points left\".format(edge, len(test_points)))\n if selection is None:\n break\n selected_point = select_candidate_point(edge, test_points, hull, min_cosine)\n if selected_point is None:\n # This edge has no more candidate points, so we ignore it in the next pass\n ignore_points.append(edge[0])\n tweet.debug(\"No candidate point found.\")\n continue\n tweet.debug(\"Found point {}, inserting new edge.\".format(selected_point))\n if not max_iterations is None:\n max_iterations -= 1\n # We add the point into the concave hull\n hull.insert(selection + 1, selected_point)\n test_points.remove(selected_point)\n return hull", "def convex_hull(self):\n if isinstance(self.crs, GeographicalCRS):\n raise CRSError(\"not implemented for geographical coordinate \"\n \"systems. Project to a projected coordinate system.\")\n\n points = [pt for pt in self]\n\n # Find the lowermost (left?) point\n pt0 = points[0]\n idx = 0\n for i, pt in enumerate(points[1:]):\n if (pt.y < pt0.y) or ((pt.y == pt0.y) and (pt.x < pt0.x)):\n pt0 = pt\n idx = i+1\n points.pop(idx)\n\n # Sort CCW relative to pt0, and drop all but farthest of any duplicates\n points.sort(key=lambda pt: pt0.distance(pt))\n points.sort(key=lambda pt: _cvectorgeo.polarangle(pt0.vertex, pt.vertex))\n alpha = -1\n drop = []\n for i,pt in enumerate(points):\n a = _cvectorgeo.polarangle(pt0.vertex, pt.vertex)\n if a == alpha:\n drop.append(i)\n else:\n alpha = a\n\n if len(drop) != 0:\n for i in drop[::-1]:\n points.pop(i)\n\n # initialize convex hull\n if len(points) == 2:\n return Polygon([pt0, points[0], points[1]])\n elif len(points) == 1:\n raise GeometryError(\"convex polygon not defined for two points\")\n else:\n\n S = [pt0, points[0], points[1]]\n for pt in points[2:]:\n while not _cvectorgeo.isleft(S[-2].vertex, S[-1].vertex, pt.vertex):\n S.pop()\n S.append(pt)\n\n return Polygon(S, crs=self.crs)", "def convex_hull(image):\n\n corners = find_corners(image)\n\n\n vertices = [corners[0]]\n\n for i in range(len(corners)):\n vertices.extend(\n _convex_hull_side(\n image, corners[i], corners[(i + 1) % len(corners)]))\n\n return vertices", "def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)", "def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def _quickhull(self, pt1, pt2, point_list):\n if not point_list:\n return []\n pt3 = max(point_list, key=lambda p: oriented_area(pt1, p, pt2))\n # Nie trzeba dzielic przez abs(pt2-pt1).\n list1 = self._points_on_the_right(pt1, pt3, point_list)\n list2 = self._points_on_the_right(pt3, pt2, point_list)\n return (self._quickhull(pt1, pt3, list1) + [pt3]\n + self._quickhull(pt3, pt2, list2))", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def get_hull_points(self, show_progress):\n if self.points and not self.hull_points:\n self.graham_scan(show_progress)\n print(\"Input: {} points\").format(len(self.points))\n print(\"Convex hull: {} points\").format(len(self.hull_points))\n return self.hull_points", "def _convex_hull_side(image, start, end):\n\n convex_points = [start]\n\n x_start, y_start = start\n x_end, y_end = end\n\n side = (x_start <= x_end, y_start <= y_end)\n\n\n ranges = {\n (True, True): [\n [x_start + 1, x_end + 1],\n [y_start, y_end + 1],\n False\n ],\n (False, True): [\n [y_start + 1, y_end + 1],\n [x_start, x_end - 1, -1],\n True\n ],\n (False, False): [\n [x_start - 1, x_end - 1, -1],\n [y_start, y_end - 1, -1],\n False\n ],\n (True, False): [\n [y_start - 1, y_end - 1, -1],\n [x_start, x_end + 1],\n True\n ]\n }\n\n prev = 0\n\n for outer in range(*ranges[side][0]):\n\n curr_pixel = None\n\n for inner in range(*ranges[side][1]):\n if ranges[side][2] and image[outer, inner] == 0:\n curr_pixel = (inner, outer)\n break\n elif not ranges[side][2] and image[inner, outer] == 0:\n curr_pixel = (outer, inner)\n break\n\n if curr_pixel is None:\n continue\n\n while True:\n # slope infinite for first point\n prev_slope = (\n float(\"-inf\") if prev == 0\n else slope(\n convex_points[prev - 1],\n convex_points[prev],\n ranges[side][2]))\n\n # remove previous point if it yields concave segment\n if prev_slope > slope(\n convex_points[prev],\n curr_pixel,\n ranges[side][2]\n ):\n convex_points.pop(prev)\n prev -= 1\n # add point to hull if it yields convex segment\n else:\n convex_points.append(curr_pixel)\n prev += 1\n break\n\n return convex_points[1:]", "def convexHull(points):\n points = np.append(points, [[0, 0, 0]], axis=0) # All points plus origin\n hull = ConvexHull(points) # Visible points plus possible origin. Use its vertices property.\n\n return hull", "def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)", "def main():\n points = np.array(\n [[1, 1], [2, 5], [3, 2], [4, 4], [5, 2], [6, 3], [2, 3], [3, 4], [5, 3]]\n )\n hull = graham_scan(points)\n hull = np.concatenate((hull, [hull[0]]))\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(points[:, 0], points[:, 1])\n ax.plot(hull[:, 0], hull[:, 1], 'r')\n ax.set_title('Convex Hull using Graham Scan')\n plt.show()", "def test_conv_full(self):\n\n points = np.array([[1, 4], [2, 1], [3, 2], [3, 3], [3, 5], [4, 2], [5, 1], [5, 3]]) # example of points \n \n cv_hull = convex_hull.convex_hull(points) # convex hull returned by the function \n\n right_conv_hull = np.array([[2, 1], [5, 1], [5, 3], [3, 5], [1, 4], [2, 1] ]) # right convex hull\n self.assertTrue((right_conv_hull == cv_hull).all())", "def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []", "def convexHull(hist, start_p = 0, end_p = 99999999, ignore = None):\n start_p = int(round(start_p))\n end_p = int(round(end_p))\n\n if end_p - start_p < 5 and (start_p !=0 or end_p != 99999999):\n return np.array(hist)\n\n hist = np.array(hist)\n\n if end_p > len(hist) :\n end_p = len(hist)\n\n hist_x = list(range(start_p, end_p))\n hist_y = np.array(hist[hist_x], dtype=np.float32)\n\n if len(hist_x) < 5:\n return np.array(hist)\n\n hist_y2 = hist_y.copy()\n if ignore is not None:\n ignore2 = ignore[hist_x]\n hist_y2[ignore2] = int(max(hist_y2)*1.5)\n\n hull_x, hull_y = getHull(hist_x, hist_y2)\n hull = getSubtractedHist(hist_x, hist_y, hull_x, hull_y)\n\n ret = list(np.zeros(start_p))\n ret.extend(hull)\n ret.extend(np.zeros(len(hist)-end_p))\n\n if ignore is not None:\n sub = np.array(ret)\n sub[ignore] = 0\n ret = list(sub)\n\n return ret" ]
[ "0.7802536", "0.7609912", "0.71923506", "0.71395326", "0.7121198", "0.70486236", "0.70343816", "0.70062226", "0.69616634", "0.6932975", "0.68979055", "0.68684614", "0.6862273", "0.6788088", "0.67031807", "0.6696616", "0.6694903", "0.66554326", "0.6629436", "0.66106343", "0.65615124", "0.65484524", "0.6518135", "0.64671934", "0.6447575", "0.6414313", "0.63775957", "0.6369052", "0.6227943", "0.622069" ]
0.778182
1
Screens summary file to identify bins with particular thresholds of completeness, contamination, and contigs.
def parse_summary(infile, completeness, contamination, contigs, binpath, outfile): failed, passed = [], [] with open(infile, 'r') as fhin, open(outfile, 'a') as fhout: next(fhin) for line in fhin: if len(line.split('\t')) > 12: cols = line.split('\t') if float(cols[5]) >= completeness \ and float(cols[6]) <= contamination \ and float(cols[11]) <= contigs: fpath = os.path.join(os.getcwd(), binpath, cols[0]+'.fa') if not os.path.isfile(fpath): raise ValueError("{} is NOT valid!".format(fpath)) else: print("{} is valid!".format(fpath)) fhout.write("{0}\t{1}\n".format(fpath, cols[0])) passed.append("PASSED\t{}\t{}\t{}\t{}\t{}\t{}\n".format(cols[0], cols[5], cols[6], cols[7], cols[8], cols[11])) else: failed.append("FAILED\t{}\t{}\t{}\t{}\t{}\t{}\n".format(cols[0], cols[5], cols[6], cols[7], cols[8], cols[11])) return passed, failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1", "def hist_and_thresh(self):\n bins, occ, _ = self.histogram()\n self.thresh = np.mean(bins) # initial guess\n self.peaks_and_thresh() # in case peak calculation fails\n # if np.size(self.peak_indexes) == 2: # est_param will only find one peak if the number of bins is small\n # # set the threshold where the fidelity is max\n # self.search_fidelity(self.peak_centre[0], self.peak_widths[0] ,self.peak_centre[1])\n try: \n thresh = threshold_minimum(np.array(self.stats['Counts']), len(bins))\n int(np.log(thresh)) # if thresh <= 0 this gives ValueError\n self.thresh = thresh\n except (ValueError, RuntimeError, OverflowError): pass\n try:\n # atom is present if the counts are above threshold\n self.stats['Atom detected'] = [x // self.thresh for x in self.stats['Counts']]\n # self.fidelity, self. err_fidelity = np.around(self.get_fidelity(), 4) # this is a relatively slow operation\n except (ValueError, OverflowError): pass\n return bins, occ, self.thresh", "def get_statistics(path):\n images, masks = get_dataset(path)\n buildings = 0\n background = 0\n water = 0\n mean = np.zeros(3)\n std = np.zeros(3)\n\n with tqdm(\n total=len(images), desc=\"Getting statistics..\", leave=False, position=0\n ) as pbar:\n for i, m in zip(images, masks):\n image = Image.open(i)\n stat = ImageStat.Stat(image)\n mean = np.add(np.asarray(stat.mean), mean)\n std = np.add(np.asarray(stat.stddev), std)\n\n mask = Image.open(m)\n\n for c in mask.getcolors():\n if c[1] == 0:\n background += c[0]\n\n if c[1] == 127:\n water += c[0]\n\n if c[1] == 255:\n buildings += c[0]\n pbar.update()\n\n mean = np.divide(mean, len(images))\n std = np.divide(std, len(images))\n\n all_pixels = buildings + background + water\n buildings_perc = (buildings / all_pixels) * 100\n water_perc = (water / all_pixels) * 100\n background_perc = (background / all_pixels) * 100\n\n filename = os.path.join(path, \"myfile.txt\")\n\n with open(filename, \"w\") as file:\n file.write(\"Mean: {}\\n\".format(mean))\n file.write(\"Standard deviation: {}\\n\".format(std))\n\n file.write(\"Building pixels: {}\\n\".format(buildings))\n file.write(\"Water pixels: {}\\n\".format(water))\n file.write(\"Background pixels: {}\\n\".format(background))\n file.write(\"Building percentage: {}\\n\".format(buildings_perc))\n file.write(\"Water percentage: {}\\n\".format(water_perc))\n file.write(\"Background percentage: {}\\n\".format(background_perc))\n\n with open(filename, \"r\") as file_r:\n print(file_r.read())", "def summary(args):\n from jcvi.formats.base import DictFile\n from jcvi.utils.cbook import percentage, Registry\n\n p = OptionParser(summary.__doc__)\n p.add_option(\"--extra\", help=\"Cross with extra tsv file\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n frfile, statusfile = args\n status = DictFile(statusfile)\n fp = open(frfile)\n registry = Registry() # keeps all the tags for any given gene\n for row in fp:\n seqid, gene, tag = row.split()\n if tag == \".\":\n registry[gene].append(\"outside\")\n else:\n registry[gene].append(\"inside\")\n if tag[0] == \"[\":\n registry[gene].append(\"no_syntenic_model\")\n if tag.startswith(\"[S]\"):\n registry[gene].append(\"[S]\")\n gstatus = status.get(gene, None)\n if gstatus == \"complete\":\n registry[gene].append(\"complete\")\n elif gstatus == \"pseudogene\":\n registry[gene].append(\"pseudogene\")\n elif gstatus == \"partial\":\n registry[gene].append(\"partial\")\n else:\n registry[gene].append(\"gmap_fail\")\n elif tag.startswith(\"[NS]\"):\n registry[gene].append(\"[NS]\")\n if \"random\" in tag or \"Scaffold\" in tag:\n registry[gene].append(\"random\")\n else:\n registry[gene].append(\"real_ns\")\n elif tag.startswith(\"[NF]\"):\n registry[gene].append(\"[NF]\")\n else:\n registry[gene].append(\"syntenic_model\")\n\n inside = registry.count(\"inside\")\n outside = registry.count(\"outside\")\n syntenic = registry.count(\"syntenic_model\")\n non_syntenic = registry.count(\"no_syntenic_model\")\n s = registry.count(\"[S]\")\n ns = registry.count(\"[NS]\")\n nf = registry.count(\"[NF]\")\n complete = registry.count(\"complete\")\n pseudogene = registry.count(\"pseudogene\")\n partial = registry.count(\"partial\")\n gmap_fail = registry.count(\"gmap_fail\")\n random = registry.count(\"random\")\n real_ns = registry.count(\"real_ns\")\n\n complete_models = registry.get_tag(\"complete\")\n pseudogenes = registry.get_tag(\"pseudogene\")\n partial_deletions = registry.get_tag(\"partial\")\n\n m = \"{0} inside synteny blocks\\n\".format(inside)\n m += \"{0} outside synteny blocks\\n\".format(outside)\n m += \"{0} has syntenic gene\\n\".format(syntenic)\n m += \"{0} lack syntenic gene\\n\".format(non_syntenic)\n m += \"{0} has sequence match in syntenic location\\n\".format(s)\n m += \"{0} has sequence match in non-syntenic location\\n\".format(ns)\n m += \"{0} has sequence match in un-ordered scaffolds\\n\".format(random)\n m += \"{0} has sequence match in real non-syntenic location\\n\".format(real_ns)\n m += \"{0} has no sequence match\\n\".format(nf)\n m += \"{0} syntenic sequence - complete model\\n\".format(percentage(complete, s))\n m += \"{0} syntenic sequence - partial model\\n\".format(percentage(partial, s))\n m += \"{0} syntenic sequence - pseudogene\\n\".format(percentage(pseudogene, s))\n m += \"{0} syntenic sequence - gmap fail\\n\".format(percentage(gmap_fail, s))\n print(m, file=sys.stderr)\n\n aa = [\"complete_models\", \"partial_deletions\", \"pseudogenes\"]\n bb = [complete_models, partial_deletions, pseudogenes]\n for a, b in zip(aa, bb):\n fw = open(a, \"w\")\n print(\"\\n\".join(b), file=fw)\n fw.close()\n\n extra = opts.extra\n if extra:\n registry.update_from(extra)\n\n fp.seek(0)\n fw = open(\"registry\", \"w\")\n for row in fp:\n seqid, gene, tag = row.split()\n ts = registry[gene]\n print(\"\\t\".join((seqid, gene, tag, \"-\".join(ts))), file=fw)\n fw.close()\n\n logging.debug(\"Registry written.\")", "def plot_completeness(cat_name,output_name,name_plot,mag_lims,binning_mag,plot,second_cat='no'):\n\n cat=ascii.read('%s.txt' % cat_name)\n mag_bins=np.arange(mag_lims[0],mag_lims[1],binning_mag)\n\n mask=cat['detected']==1\n mag_binned_tot=np.digitize(cat['MAG'],mag_bins,right=True)\n mag_binned_det=np.digitize(cat[mask]['MAG'],mag_bins,right=True)\n\n nb_mag=np.array([ len(np.where(mag_binned_tot==i)[0]) for i in range(1,len(mag_bins)) ])\n nb_mag_det = np.array([ len(np.where(mag_binned_det==i)[0]) for i in range(1,len(mag_bins)) ])\n #mag_tot= np.array([stuff_cat['MAG'][mag_binned_tot == i].mean() for i in range(1, len(mag_bins))])\n #mag_det= np.array([stuff_cat[mask]['MAG'][mag_binned_det == i].mean() for i in range(1, len(mag_bins))])\n print (nb_mag)\n print (nb_mag_det)\n\n #Write completeness result in text file\n np.savetxt('%s.txt' % output_name, list(zip(mag_bins,nb_mag,nb_mag_det)),fmt='%.2f %d %d')\n\n\n mag_bin_plot=(mag_bins[:-1]+mag_bins[1:])/2\n\n import matplotlib.pyplot as plt\n\n # the histogram of the input sources\n n, bins, patches = plt.hist(cat['MAG'], mag_bins, normed=0, facecolor='green', alpha=0.75)\n plt.xlabel('Magnitude')\n plt.ylabel('Nb of sources')\n plt.xlim([mag_bins[0],mag_bins[-1]])\n plt.savefig('results/plots/hist_sources.png')\n #plt.show()\n\n plt.clf()\n plt.plot(mag_bin_plot,nb_mag_det/nb_mag)\n plt.xlabel('Magnitude AB')\n plt.ylabel('Efficiency')\n plt.grid(True)\n plt.savefig('%s.png' % output_name)\n if plot: plt.show()\n\n\n if second_cat != 'no':\n cat2=ascii.read('%s.txt' % second_cat)\n mag_bins2=np.arange(mag_lims[0],mag_lims[1],binning_mag)\n\n mask2=cat2['detected']==1\n mag_binned_tot2=np.digitize(cat2['MAG'],mag_bins2,right=True)\n mag_binned_det2=np.digitize(cat2[mask2]['MAG'],mag_bins2,right=True)\n\n nb_mag2=np.array([ len(np.where(mag_binned_tot2==i)[0]) for i in range(1,len(mag_bins2)) ])\n nb_mag_det2 = np.array([ len(np.where(mag_binned_det2==i)[0]) for i in range(1,len(mag_bins2)) ])\n\n mag_bin_plot2=(mag_bins2[:-1]+mag_bins2[1:])/2\n #print (mag_bin_plot)\n #plt.plot(mag_bin_plot,nb_mag_det/nb_mag,label='seeing=0.7\"',color='red')\n #plt.plot(mag_bin_plot2,nb_mag_det2/nb_mag2,label='seeing=0.1\"',color='green')\n plt.plot(mag_bin_plot,nb_mag_det/nb_mag,label='5.9',color='red')\n plt.plot(mag_bin_plot2,nb_mag_det2/nb_mag2,label='5',color='green')\n plt.xlabel('Magnitude AB')\n plt.ylabel('Efficiency')\n #plt.yscale('log')\n #plt.xscale('log')\n plt.grid(True)\n plt.legend()\n plt.savefig('results/plots/completeness_comp.png')\n if plot: plt.show()", "def histogram_summary(self, tag, values, step, bins=1000):\n self.writer.add_histogram(tag, values, step, bins='auto')", "def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins", "def histogram(self):\n if np.size(self.stats['Counts']): # don't do anything to an empty list\n if np.size(self.bins) and not self.redo:\n return self.bins, self.occs, self.thresh\n elif np.size(self.bin_array) > 0: \n self.occs, self.bins = np.histogram(self.stats['Counts'], self.bin_array) # fixed bins. \n else:\n try:\n lo, hi = min(self.stats['Counts'])*0.97, max(self.stats['Counts'])*1.02\n # scale number of bins with number of files in histogram and with separation of peaks\n num_bins = int(15 + self.ind//100 + (abs(hi - abs(lo))/hi)**2*15) \n self.occs, self.bins = np.histogram(self.stats['Counts'], bins=np.linspace(lo, hi, num_bins+1)) # no bins provided by user\n except: \n self.occs, self.bins = np.histogram(self.stats['Counts'])\n else: self.occs, self.bins = np.zeros(10), np.arange(0,1.1,0.1)\n return self.bins, self.occs, self.thresh", "def efficient_Make_Binned_ROC_histograms(title, data, bins, PU_range='full'):\n diff_ran = (-25,25)\n diff_bins = diff_ran[1]-diff_ran[0]\n ratio_ran = (0,10)\n ratio_bins = 60\n\n Diff_hist_list = []\n Ratio_hist_list = []\n CSV_hist_list = []\n ZeroDiv_list = []\n for bin_ in range(len(bins)-1):\n Diff_hist_list.append(rt.TH1D(\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),diff_bins,diff_ran[0],diff_ran[1]))\n Ratio_hist_list.append(rt.TH1D(\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,ratio_ran[0],ratio_ran[1]))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,0,1))\n ZeroDiv_list.append(0)\n\n for particle in data:\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n\n Diff_hist_list[bin_number].Fill(particle[8]-particle[5])\n CSV_hist_list[bin_number].Fill(particle[1])\n if particle[17] != 0:\n L4_L1 = particle[20]/particle[17]\n Ratio_hist_list[bin_number].Fill(L4_L1)\n else:\n ZeroDiv_list[bin_number] += 1\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in Diff_hist_list:\n hist.Write()\n for hist in Ratio_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)\n\n csv_file = open(\"Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title),\"wb\")\n writer = csv.writer(csv_file)\n writer.writerow(ZeroDiv_list)\n csv_file.close()\n print \"saved zero division occurences in Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title)", "def cal_ResBeam_Stats(infile, header_bmaj, header_bmin):\n\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n ind_nonzero_bmaj = np.nonzero(bmaj) # finding array indices of nonzero values\n ind_nonzero_bmin = np.nonzero(bmin)\n total_nbmaj = np.count_nonzero(bmaj) # count total number of bmaj non zero occurance\n total_nbmin = np.count_nonzero(bmin)\n bmaj_variance = (np.sum((bmaj[ind_nonzero_bmaj]-header_bmaj)**2.0))/total_nbmaj # using header beam value as mean \n bmin_variance = (np.sum((bmin[ind_nonzero_bmin]-header_bmin)**2.0))/total_nbmin\n bmaj_stdev = np.sqrt(bmaj_variance)\n bmin_stdev = np.sqrt(bmin_variance)\n beam_threshold = round((((header_bmaj + bmaj_stdev) * (header_bmin + bmin_stdev))/ (header_bmaj*header_bmin))-1.0, 4)\n bmaj_max = np.max(bmaj[ind_nonzero_bmaj])\n bmaj_min = np.min(bmaj[ind_nonzero_bmaj])\n bmin_max = np.max(bmin[ind_nonzero_bmin])\n bmin_min = np.min(bmin[ind_nonzero_bmin])\n max_ratio_beam_area = (bmaj_max*bmin_max)/(header_bmaj*header_bmin) # measured beam area / header beam area\n min_ratio_beam_area = (bmaj_min*bmin_min)/(header_bmaj*header_bmin)\n\n return bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_beam_area, min_ratio_beam_area", "def find_numerical_contours(counts):\n\tone_sigma_boundary = sigma_boundary(counts, 68)\n\tone_sigma = counts > one_sigma_boundary\n\ttwo_sigma_boundary = sigma_boundary(counts, 95)\n\ttwo_sigma = (counts > two_sigma_boundary) & (counts < one_sigma_boundary)\n\tthree_sigma_boundary = sigma_boundary(counts, 99)\n\tthree_sigma = (counts > three_sigma_boundary) & (counts < two_sigma_boundary)\n\n\t# Check method: Output actual percentages in each region\n\tprint('total no. samples:')\n\tprint(np.sum(counts))\n\tprint('included in 1st sigma region:')\n\tprint(np.sum(one_sigma * counts) / np.sum(counts))\n\tprint('included in 2 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts)) / np.sum(counts))\n\tprint('included in 3 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts) + np.sum(three_sigma * counts)) / np.sum(counts))\n\n\tfilled_numerical_contours = one_sigma * 1 + two_sigma * 2 + three_sigma * 3\n\n\treturn filled_numerical_contours", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def test_get_histogram_stat_by_breakout(self):\n pass", "def at_binPhaseFold(self,ph,bwmin):\n\t # Default dtype\n\t dtype = [('count', '<f8'), \n\t ('mean', '<f8'), \n\t ('std', '<f8'), \n\t ('med', '<f8'), \n\t ('tb', '<f8')] \n\n\t key = 'lcPF%i' % ph\n\t d = dict(ph=ph,bwmin=bwmin,key=key)\n\t desc = 'Binned %(key)s light curve ph=%(ph)i, binsize=%(bwmin)i' % d\n\t name = 'blc%(bwmin)iPF%(ph)i' % d\n\n\t assert hasattr(self,key),'Must run at_phaseFold first' \n\t lcPF = getattr(self,key)\n\t lcPF = pd.DataFrame(lcPF['tPF f'.split()])\n \n\t if len(lcPF) < 2:\n\t print(\"Phase-folded photometry has less than 2 valid values\")\n\t print(\"Adding in place holder array and terminating\") \n\t blcPF = np.zeros(2,dtype)\n\t self.add_dset(name,blcPF,description=desc) \n\t return None\n\n\t # Add a tiny bit to xma to get the last element\n\t bw = bwmin / 60./24. # converting minutes to days\n\t xmi,xma = lcPF.tPF.min(),lcPF.tPF.max() \n\t nbins = int( np.round( (xma-xmi)/bw ) )\n\t bins = np.linspace(xmi-0.001,xma,nbins+1)\n\t tb = 0.5*(bins[1:]+bins[:-1])\n\n\t # Compute info along columns\n\t g = lcPF.groupby(pd.cut(lcPF.tPF,bins))\n\t blcPF = g['f'].agg([np.size, np.mean, np.std, np.median])\n\t blcPF['tb'] = tb\n\t blcPF = blcPF.rename(columns={'size':'count','median':'med'})\n\t blcPF = blcPF.dropna()\n\t blcPF = blcPF.to_records(index=False)\n\t self.add_dset(name,blcPF,description=desc)", "def at_binPhaseFold(self,ph,bwmin):\n\t # Default dtype\n\t dtype = [('count', '<f8'), \n\t ('mean', '<f8'), \n\t ('std', '<f8'), \n\t ('med', '<f8'), \n\t ('tb', '<f8')] \n\n\t key = 'lcPF%i' % ph\n\t d = dict(ph=ph,bwmin=bwmin,key=key)\n\t desc = 'Binned %(key)s light curve ph=%(ph)i, binsize=%(bwmin)i' % d\n\t name = 'blc%(bwmin)iPF%(ph)i' % d\n\n\t assert hasattr(self,key),'Must run at_phaseFold first' \n\t lcPF = getattr(self,key)\n\t lcPF = pd.DataFrame(lcPF['tPF f'.split()])\n \n\t if len(lcPF) < 2:\n\t print(\"Phase-folded photometry has less than 2 valid values\")\n\t print(\"Adding in place holder array and terminating\") \n\t blcPF = np.zeros(2,dtype)\n\t self.add_dset(name,blcPF,description=desc) \n\t return None\n\n\t # Add a tiny bit to xma to get the last element\n\t bw = bwmin / 60./24. # converting minutes to days\n\t xmi,xma = lcPF.tPF.min(),lcPF.tPF.max() \n\t nbins = int( np.round( (xma-xmi)/bw ) )\n\t bins = np.linspace(xmi-0.001,xma,nbins+1)\n\t tb = 0.5*(bins[1:]+bins[:-1])\n\n\t # Compute info along columns\n\t g = lcPF.groupby(pd.cut(lcPF.tPF,bins))\n\t blcPF = g['f'].agg([np.size, np.mean, np.std, np.median])\n\t blcPF['tb'] = tb\n\t blcPF = blcPF.rename(columns={'size':'count','median':'med'})\n\t blcPF = blcPF.dropna()\n\t blcPF = blcPF.to_records(index=False)\n\t self.add_dset(name,blcPF,description=desc)", "def histogram(histo,nbr_launch,file):\n with open(\"Results/Histogram_{}_{}.txt\".format(nbr_launch,file.strip(\".yaml\")),'w') as f:\n f.write(\"mgm results :\"+\"\\n\")\n for val,occur in histo[\"mgm\"].items():\n f.write(\"value \"+str(val)+\" : \"+str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"mcs_mgm results :\" + \"\\n\")\n for val, occur in histo[\"mcs_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"gca_mgm results :\" + \"\\n\")\n for val, occur in histo[\"gca_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")", "def ANN_Make_Binned_ROC_histograms(title,model, x_data, pT, CSV, bins, PU_range='full',addFeature=False):\n nbins = 60\n\n ANN_hist_list = []\n CSV_hist_list = []\n for bin_ in range(len(bins)-1):\n ANN_hist_list.append(rt.TH1D(\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n\n\tif addFeature == False:\n\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\telif addFeature == \"pT\":\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\telif addFeature == \"PV\":\n\t\tassert x_data.shape[1] == 21, \"wrong x_data shape: PV cannot be found\"\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\telse:\n\t\tprint \"invalid feature selection\"\n\t\treturn None\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\n for n,particle in enumerate(x_data):\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n if bin_numbers[n] == -100: continue\n ANN_hist_list[int(bin_numbers[n])].Fill(pred_y[n])\n CSV_hist_list[int(bin_numbers[n])].Fill(CSV[n])\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in ANN_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)", "def gc_bin_bedfile(\n bedfile, genome, number, length=200, bins=None, random_state=None, min_bin_size=100\n):\n if bins is None:\n bins = [(0.0, 0.2), (0.8, 1.0)]\n for b in np.arange(0.2, 0.799, 0.05):\n bins.append((round(b, 2), round(b + 0.05, 2)))\n bins = sorted(bins)\n\n if number < len(bins):\n raise ValueError(\"Number of sequences requested < number of bins\")\n\n fname = os.path.join(\n CACHE_DIR, f\"{os.path.basename(genome)}.gcfreq.{min_bin_size}.feather\"\n )\n try:\n df = pd.read_feather(fname)\n except FileNotFoundError:\n if not os.path.exists(CACHE_DIR):\n os.makedirs(CACHE_DIR)\n create_gc_bin_index(genome, fname, min_bin_size=min_bin_size)\n df = pd.read_feather(fname)\n\n if length >= min_bin_size:\n col = f\"w{((length + min_bin_size // 2) // min_bin_size) * min_bin_size}\"\n else:\n logger.warning(\n f\"For regions smaller than {min_bin_size} nt, GC% will not be exact\"\n )\n col = f\"w{min_bin_size}\"\n\n if col not in df.columns:\n df[col] = (\n df.iloc[:, 3]\n .rolling(length // min_bin_size, min_periods=length // min_bin_size)\n .mean()\n )\n df[col.replace(\"w\", \"n\")] = (\n df.iloc[:, 3]\n .rolling(length // min_bin_size, min_periods=length // min_bin_size)\n .sum()\n )\n\n df = df[df[col.replace(\"w\", \"n\")] < 0.1 * length]\n n = number // len(bins)\n\n with open(bedfile, \"w\") as f:\n pass\n\n with open(bedfile, \"a\") as f:\n for b_start, b_end in bins:\n df_bin = df[(df[col] > b_start) & (df[col] <= b_end)].copy()\n df_bin[\"start\"] = df_bin[\"end\"] - length\n df_bin = df_bin[df_bin[\"start\"] > 0]\n if df_bin.shape[0] > 0:\n df_bin = df_bin.sample(n, replace=True, random_state=random_state)\n df_bin[\"bin\"] = f\"{b_start:.2f}-{b_end:.2f}\"\n df_bin[[\"chrom\", \"start\", \"end\", \"bin\"]].to_csv(\n f, sep=\"\\t\", header=False, index=False\n )", "def make_RH_bins(path,start_date,end_date,bin_opt):\r\n \r\n # Load full output to read relative humidities (RH)\r\n time=[]\r\n with open(path, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==2:\r\n header = row\r\n elif ct>3:\r\n curtime = datetime.strptime('{} {}'.format(row[1],row[2]),\r\n '%Y-%m-%d %H:%M')\r\n time.append(curtime)\r\n ct+=1\r\n \r\n # Remove text columns from data and corresponding headers\r\n header = header[3:]\r\n data = np.genfromtxt(path,delimiter=',',skip_header=3) \r\n data = data[:,3:]\r\n \r\n sdatetime = datetime.strptime(start_date,'%Y-%m-%d')\r\n edatetime = datetime.strptime(end_date,'%Y-%m-%d')\r\n edatetime = edatetime+timedelta(1) # Add a day to include whole end_date\r\n # Cut to selected dates\r\n timei = [(t>sdatetime)&(t<=edatetime) for t in time]\r\n data = data[timei,:]\r\n \r\n RHi = [h=='RH' for h in header]\r\n RHi = np.where(RHi)[0][0]\r\n RH = np.ndarray.flatten(data[:,RHi])\r\n nni = ~np.isnan(RH)\r\n # Get the indices for each selected option\r\n bindex = np.zeros((len(RH),len(bin_opt)))\r\n for boi in range(len(bin_opt)):\r\n # Apply RH bin options to get bin limits\r\n if bin_opt[boi].lower()=='quantile':\r\n RH_bins = np.hstack((np.nanmin(RH)-0.01,\r\n np.quantile(RH[nni],[0.25,0.5,0.75]),\r\n np.nanmax(RH)+0.01))\r\n elif bin_opt[boi].lower()=='none':\r\n RH_bins = np.array([0,100])\r\n \r\n # Label each point with a bin (e.g. 0=exclude, 1=first bin, etc.)\r\n for bi in range(1,len(RH_bins)):\r\n thisbin = (RH>=RH_bins[bi-1]) & (RH<RH_bins[bi])\r\n bindex[thisbin,boi] = bi\r\n \r\n return bindex, time, data, header", "def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def analyze_thresholds(datapath, threshold_lt1, threshold_lt2, normalize = True, save = 1):\n print 'analyzing thresholds...' \n current_dir = os.getcwd()\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n \n CR_cts_after_seq_lt1 = data['cr_hist_LT1_first']\n CR_cts_after_seq_lt2 = data['cr_hist_LT2_first']\n\n nr_of_counts = arange(len(CR_cts_after_seq_lt1))\n\n CR_cts_total_lt1 = data['cr_hist_LT1_total']\n CR_cts_total_lt2 = data['cr_hist_LT2_total']\n \n if normalize:\n CR_cts_after_seq_lt2 = CR_cts_after_seq_lt2/float(sum(CR_cts_after_seq_lt2))\n CR_cts_total_lt2 = CR_cts_total_lt2/float(sum(CR_cts_total_lt2))\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()*100\n \n CR_cts_after_seq_lt1 = CR_cts_after_seq_lt1/float(sum(CR_cts_after_seq_lt1))\n CR_cts_total_lt1 = CR_cts_total_lt1/float(sum(CR_cts_total_lt1))\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100\n else:\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_after_seq_lt2.sum())*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_total_lt2.sum())*100\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_after_seq_lt1.sum())\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_total_lt1.sum())\n\n\n #print 'After sequence: LT2 percentage passed = ',num2str(sum(times_passed_after_seq_lt2),1),'%'\n #print 'and LT1 percentage passed = ',num2str(sum(times_passed_after_seq_lt1),1),'%'\n\n Log = False\n\n figure6 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(223)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt2,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,25)\n \n plt.subplot(224)\n plt.bar(nr_of_counts,CR_cts_total_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: all CR checks, passed threshold: '+num2str(times_passed_overall_lt2,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,25)\n\n plt.subplot(221)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt1,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,50)\n \n plt.subplot(222)\n plt.bar(nr_of_counts,CR_cts_total_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: all CR checks, passed threshold: '+num2str(times_passed_overall_lt1,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,50)\n \n if save:\n if normalize:\n figure6.savefig('CR_information_LT1_and_LT2_normalized.pdf')\n else:\n figure6.savefig('CR_information_LT1_and_LT2.pdf')\n\n\n return times_passed_overall_lt1, times_passed_after_seq_lt1, times_passed_overall_lt2, times_passed_after_seq_lt2", "def coverage_cells(adata, bins=50, key_added=None, log=False, binary=None, xlabel=None,\n ylabel=None, title=None, color=None, edgecolor=None, save=None):\n if key_added == None:\n key_added='nb_features'\n \n # calculate the number of features per cell\n if binary==None:\n warnings.warn(\"\"\"The argument binary was not specified. To reduce computing time, you can specify if the matrix is already binary\"\"\")\n if binary:\n sum_peaks = np.sum(adata.X, axis=1).tolist()\n else:\n tmp_array = binarize(adata, copy=True)\n sum_peaks = np.sum(tmp_array.X, axis=1).tolist()\n \n \n if issparse(adata.X):\n sum_peaks = [element[0] for element in sum_peaks] \n\n adata.obs[key_added] = sum_peaks\n \n\n \n # plotting parameters\n if xlabel ==None:\n plt.xlabel('number of features')\n else:\n plt.xlabel(xlabel)\n \n if ylabel ==None:\n plt.ylabel('number of cells')\n else:\n plt.ylabel(ylabel)\n \n if title !=None:\n plt.title(title)\n \n if color == None: \n color='c'\n if edgecolor == None:\n edgecolor='k'\n\n if log:\n if 0 in sum_peaks:\n warnings.warn(\"\"\"Some cells do not contain any open feature. Use epi.pp.filter_cells(adata, min_features=1) to remove these cells.\"\"\")\n \n plt.xlabel('number of features (log scale)')\n fig = plt.hist(np.log(sum_peaks), bins, color=color, edgecolor=edgecolor)\n else:\n fig = plt.hist(sum_peaks, bins, color=color, edgecolor=edgecolor)\n \n #fig = plot.get_figure()\n if save!= None:\n plt.savefig(save)", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def histogram(data, bins=50, nmb_data_to_use=None, ignored_row=0,\n col_to_read=1, output_file=None, verbose=0):\n # prepare arguments\n args = \"-x{} -c{} -b{} -V{}\" \\\n .format(ignored_row, col_to_read, bins, verbose)\n if nmb_data_to_use is not None:\n args += \"-l{}\".format(nmb_data_to_use)\n args = args.split(\" \")\n # run command\n res, msg = tisean('histogram', args, input_data=data,\n output_file=output_file)\n # return\n if msg != \"\":\n print(msg)\n return res", "def ccl_summary_stats(params,\n fname_template='../stats/lhs_mpk_err_lin_%05d_z%d.dat',\n thresholds=[5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2],\n scale_ranges = [(1e-4, 1e-2), (1e-2, 1e-1), (1e-1, 1e0)],\n z_vals = ['1', '2', '3', '4', '5', '6'],\n cache_name=None):\n # Get dimensions of stats array that will be constructed\n N_samp = params['id'].size\n N_thres = len(thresholds)\n N_z = len(z_vals)\n N_kbins = len(scale_ranges)\n\n # Check if data were cached\n if cache_name is not None:\n try:\n stats = np.load(\"%s.npy\" % cache_name)\n print(\" Loaded '%s' from cache.\" % cache_name)\n assert stats.shape == (N_samp, N_thres, N_z, N_kbins)\n return stats, params\n except:\n raise\n\n # Create array to hold summary statistics, with shape:\n # (N_samp, N_thres, N_z, N_kbins)\n stats = np.zeros((N_samp, N_thres, N_z, N_kbins))\n\n # Loop over sample points in parameter space and calculate summary stats\n for i in range(N_samp):\n trial = params['id'][i]\n print(\" Loading CCL power spectra for parameter set %05d\" % i)\n\n # Loop over redshift values\n for j in range(N_z):\n\n # Load cached CCL power spectrum data\n fname = fname_template % (i, z_vals[j])\n pk_ccl_dat = np.loadtxt(fname, skiprows=1)\n ccl_k = pk_ccl_dat[:,0]\n ccl_pk = pk_ccl_dat[:,1]\n\n # Calculate summary stats in each k bin\n for m in range(N_kbins):\n kmin, kmax = scale_ranges[m]\n idxs = np.logical_and(ccl_k >= kmin, ccl_k < kmax)\n\n # Calculate deviation statistic, Delta, for a range of\n # threshold values (only values above the threshold are counted)\n # FIXME: ccl_pk is actually the deviation, which was\n # precomputed somewhere!\n for n, thres in enumerate(thresholds):\n # Calculate deviation statistic\n dev = np.log10(np.abs(ccl_pk[idxs]) / thres)\n dev[np.where(dev < 0.)] = 0.\n\n # Store result in stats array (N_samp, N_thres, N_z, N_kbins)\n stats[i, n, j, m] = np.sum(dev)\n\n # Save to cache file\n if cache_name is not None:\n np.save(cache_name, stats)\n\n return stats, params", "def make_kin_summary(loess=False, contours=\"vband\", format=\"png\",\r\n sn_lims=None, sn_loess=None, sn_sig=True):\r\n ##########################################################################\r\n # Set the limits for the S/N in case it is not defined by the user\r\n # sn_cut is defined in the setup_n3311 file\r\n if sn_lims == None:\r\n sn_lims = [sn_cut] * 4\r\n ##########################################################################\r\n # In case of LOESS smoothing, set the smoothing region (S/N < sn_loess)\r\n if sn_loess == None:\r\n sn_loess = [25, 25, 25, 25]\r\n ##########################################################################\r\n # Read data values for Lick indices\r\n data = np.loadtxt(outtable, usecols=(5, 7, 9, 11)).T\r\n # Read spectra name\r\n s = np.genfromtxt(outtable, usecols=(0,), dtype=None).tolist()\r\n ########################################################\r\n # Read coords and S/N\r\n xall, yall, sn = np.loadtxt(outtable, usecols=(1, 2, 14)).T\r\n ##########################################################\r\n # If using S/N / sigma instead of S/N\r\n if sn_sig == True:\r\n sn /= data[1] / 100.\r\n ########################################################\r\n # Read values of other authors\r\n tab1a, tab1b = get_richtler()\r\n tab2 = get_ventimiglia()\r\n ###############################################\r\n # Details of the maps\r\n # Name to be displayed during processing\r\n titles = [r\"velocity\", r\"sigma\", r\"h3\", r\"h4\"]\r\n # Tex strings to be used in labels\r\n cb_label = [r\"V$_{\\rm LOS}$ [km/s]\", r\"$\\sigma_{\\rm LOS}$ [km/s]\",\r\n r\"$h_3$\", r\"$h_4$\"]\r\n # Ranges of the plots\r\n # lims = [[3750, 4000], [200, 500], [None, None], [None, None]]\r\n lims = [[3700, 4100], [180, 500], [-0.08, 0.08], [-0.05, 0.11]]\r\n # Position of the colorbars\r\n xcb = [0.075, 0.555]\r\n xcb = xcb + xcb\r\n yc1 = 0.56\r\n yc2 = 0.085\r\n ycb = [yc1, yc1, yc2, yc2]\r\n # Colormap\r\n cmap = \"Spectral_r\"\r\n ylabels = [1, 0, 1, 0]\r\n xlabels = [0, 0, 1, 1]\r\n cb_fmts = [\"%d\", \"%d\", \"%.2f\", \"%.2f\"]\r\n ###############################################\r\n # Initialize figure and subplots\r\n fig = plt.figure(figsize=(12.5, 12))\r\n gs = gridspec.GridSpec(2, 2)\r\n gs.update(left=0.045, right=0.988, bottom=0.05, top=0.99, hspace=0.03,\r\n wspace=0.03)\r\n # Loop for figures\r\n for i, vector in enumerate(data):\r\n print \"Producing figure for {0}...\".format(titles[i])\r\n good = np.where(((~np.isnan(vector)) & (sn > sn_lims[i])))[0]\r\n if loess:\r\n sn_high = np.where(((~np.isnan(vector)) & (sn >= sn_loess[i])))[0]\r\n sn_low = np.delete(good, sn_high)\r\n vector_low = ll.loess_2d(xall[sn_low], yall[sn_low],\r\n vector[sn_low], frac=frac_loess)\r\n vector_high = vector[sn_high]\r\n good = np.hstack((sn_high, sn_low))\r\n v = np.hstack((vector_high, vector_low))\r\n else:\r\n v = vector[good]\r\n mad = 1.4826 * np.median(np.abs(v - np.median(v)))\r\n ######################################################################\r\n # Set limits according to median deviation if not defined in lims\r\n vmin = np.median(v) - 1.5 * mad if lims[i][0] == None else lims[i][0]\r\n vmax = np.median(v) + 1.5 * mad if lims[i][0] == None else lims[i][1]\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n ######################################################################\r\n ax = plt.subplot(gs[i])\r\n coll = PolyCollection(polygons_bins[good], array=v, cmap=cmap,\r\n edgecolors='w', norm=norm)\r\n draw_map(fig, ax, coll)\r\n draw_contours(contours, fig, ax)\r\n plt.gca().add_patch(Rectangle((18, -36), 20, 10, alpha=1, zorder=1000,\r\n color=\"w\"))\r\n draw_colorbar(fig, ax, coll, cblabel=cb_label[i],\r\n cbar_pos=[xcb[i], ycb[i], 0.09, 0.02],\r\n ticks=np.linspace(vmin, vmax, 4), cb_fmt=cb_fmts[i],\r\n labelsize=12)\r\n xylabels(ax, y=ylabels[i], x=xlabels[i])\r\n if i not in [0, 2]:\r\n ax.set_yticklabels([])\r\n if i < 2:\r\n ax.set_xticklabels([])\r\n #####################################################\r\n # Draw long slits of other papers\r\n #####################################################\r\n if i > 1:\r\n continue\r\n bc = [\"g\", \"g\", \"b\", \"b\"]\r\n for k, tab in enumerate([tab1a, tab1b, tab2[4:], tab2[:4]]):\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n idx = np.argsort(tab[:, 0])\r\n points = np.array([tab[:, 0][idx], tab[:, 1][idx]]).T.reshape(-1,\r\n 1, 2)\r\n segments = np.concatenate([points[:-1], points[1:]],\r\n axis=1)\r\n lc = LineCollection(segments, array=tab[:, i + 2],\r\n cmap=\"cubelaw\", norm=norm, lw=5)\r\n ax.add_collection(lc)\r\n add_borders(ax, points, c=bc[k])\r\n nloess = \"_loess\" if loess else \"\"\r\n plt.savefig(\"figs/kinmaps{0}_{1}.{2}\".format(nloess, contours, format),\r\n dpi=100)\r\n return", "def histogram_indication(self, hist: dict=None) -> dict:\n file_list = [os.path.join(self.sourceDirectory, file_name) for file_name in os.listdir(self.sourceDirectory) if\n file_name.startswith(\"NCT\")]\n result = {} if hist is None else hist\n for file_name in file_list:\n log.info('process ' + file_name)\n trial = xmltodict.parse(open(file_name, \"rb\"))['clinical_study']\n if 'condition' not in trial:\n continue\n if isinstance(trial['condition'], list):\n for c in trial['condition']:\n result[c] = result.get(c, 0) + 1\n else:\n result[trial['condition']] = result.get(trial['condition'], 0) + 1\n return result", "def plot_bacteria_hist(folder, depth=6, mid_quantile=False):\n\n # Get the stool dataset and discretize it\n ds = parser.get_dataset()\n ds = compute_relative_values(ds)\n t = Tree(ds)\n ds = t.dataset_at_depth(depth)\n\n # Get header names to priint on the plots\n headers = ds[0][2:]\n\n for index, header in enumerate(headers):\n\n node = t.node_for_clade_name(header)\n abundances = t.abundance_column_in_subtree(node)\n abundances = [round(x,3) for x in abundances]\n\n if mid_quantile:\n abundances.sort()\n abundances = abundances[int(len(abundances)*0.25): -int(len(abundances)*0.25)]\n\n xlabel('Relative abundance')\n ylabel('Bin size')\n\n title_text = header.replace('/','-').replace('|', '-')\n title(title_text)\n binwidth = 0.001\n bins, bin_sizes, patches = hist(abundances, bins=np.arange(min(abundances), max(abundances) + binwidth, binwidth), color='#0066FF')\n\n # Write discretized values\n threshold, discretized_abundances = discretize_row(abundances, maxent_discretization_splitter)\n _0 = '0: ' + str(len([x for x in discretized_abundances if x == 0]))\n _1 = '1: ' + str(len([x for x in discretized_abundances if x == 1]))\n\n text_x = 0.7\n\n smaples_text = 'Samples: %d' % len(abundances)\n figtext(text_x, 0.85, smaples_text, fontsize=10)\n\n threshold_text = 'Splitter: %f' % threshold\n figtext(text_x, 0.82, threshold_text, fontsize=10)\n figtext(text_x, 0.79, _0, fontsize=10)\n figtext(text_x, 0.76, _1, fontsize=10)\n\n # Draw threshold line\n max_bin = len(abundances)\n if len(bins) != 0:\n max_bin = max(bins)\n\n a, b = [threshold, threshold], [0, max_bin]\n plot(a, b, c='r')\n\n grid(True)\n\n # Write max and avg\n # max_abundance = 'max: %f' % max(abundances)\n # avg_abundance = 'avg: %f' % (sum(abundances) / float(len(abundances)))\n # figtext(text_x, 0.76, max_abundance, fontsize=10)\n # figtext(text_x, 0.73, avg_abundance, fontsize=10)\n\n # write variance\n # variance = 'var: %f' % tvar(abundances)\n # figtext(text_x, 0.70, variance, fontsize=10)\n\n # Save fig to folder\n if not (os.path.exists(folder)):\n os.makedirs(folder)\n file_name = os.path.join(folder, title_text)\n print 'Hist: ', file_name\n savefig(file_name)\n\n close()", "def GFFthreshold(infn,outbed):\n converterd = {'probe':nodate,'a':nodate,'b':nodate}\n logging.debug('reading GFF into record array')\n a = csv2rec(infn, \n delimiter='\\t', \n names=('chr','prog','id','start','stop','ratio','a','b','probe'),\n converterd=converterd)\n logging.debug('sorting record array')\n a.sort(order=('chr','start'))\n fout = open(outbed,'w')\n m = a.ratio.mean()\n std = a.ratio.std()\n thresh = m + 2.5 * std\n allregions = []\n region = []\n lastchr = a.chr[0]\n lastpos = None\n count = 0\n\n for data in a:\n if data.ratio < thresh:\n continue\n\n if lastpos is None:\n dist = 0\n else:\n dist = data.start - lastpos\n \n logging.debug('region is currently')\n for i in region:\n logging.debug('\\t%s' % i)\n logging.debug('this data: %s' % data)\n logging.debug('dist from last: %s' % dist)\n \n if dist > 500 or data.chr != lastchr:\n \n logging.debug('\\ndist > 500; checking region len')\n logging.debug('regionlen: %s' % len(region))\n for i in region:\n logging.debug('\\t%s' % i )\n if len(region) < 4:\n logging.debug('region not long enough, erasing')\n else:\n logging.debug('region is long enough!!!!')\n logging.debug('region to be exported is')\n for i in region:\n logging.debug('\\t%s' % i)\n chr = region[0].chr\n start = region[0].start\n stop = region[-1].stop\n fout.write('%s\\t%s\\t%s\\n' % (chr,start,stop))\n count += 1\n region = []\n\n lastpos = data.stop\n lastchr = data.chr\n logging.debug('adding %s to region' % data)\n region.append(data)\n\n if len(region) >= 4:\n logging.debug('last region will be exported')\n logging.debug('region to be exported is')\n for i in region:\n logging.debug('\\t%s' % i)\n \n chr = region[0].chr\n start = region[0].start\n stop = region[-1].stop\n fout.write('%s\\t%s\\t%s\\n' % (chr,start,stop))\n count += 1\n\n else:\n logging.debug('last region not long enough')\n\n fout.close()\n logging.debug('Number of enriched regions: %s' % count)\n logging.debug('using threshold: %s' % thresh)" ]
[ "0.59393805", "0.5846943", "0.5824251", "0.57082117", "0.56558704", "0.55968004", "0.5595293", "0.5591168", "0.5585121", "0.5570465", "0.5558383", "0.55573976", "0.55197114", "0.5517921", "0.5517921", "0.5517653", "0.54925233", "0.5462388", "0.541623", "0.5410834", "0.5406046", "0.5401173", "0.5388622", "0.5344496", "0.5333668", "0.5328971", "0.5327175", "0.53031707", "0.5300421", "0.5283291" ]
0.64799595
0
Test PBAlignFiles.__init__() with a reference repository.
def test_init(self): # Without region table p = PBAlignFiles(self.inputFileName, self.referencePath, self.outputFileName) self.assertTrue(filecmp.cmp(p.inputFileName, self.inputFileName)) self.assertTrue(p.referencePath, path.abspath(path.expanduser(self.referencePath))) self.assertTrue(filecmp.cmp(p.targetFileName, self.targetFileName)) self.assertTrue(filecmp.cmp(p.outputFileName, self.outputFileName)) self.assertIsNone(p.regionTable)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init_region_table(self):\n # With an artifical region table\n regionTable = path.join(self.rootDir, \"data/lambda.rgn.h5\")\n p = PBAlignFiles(self.inputFileName,\n self.referenceFile,\n self.outputFileName,\n regionTable)\n self.assertTrue(filecmp.cmp(p.regionTable, regionTable))", "def __init__(self, seqrepo_access: SeqRepoAccess) -> None:\n self.seqrepo_access = seqrepo_access", "def test_01_init(self):\n\n global primary_instance\n\n # Set up a client directory first.\n uptane.common.create_directory_structure_for_client(\n TEMP_CLIENT_DIR,\n create_primary_pinning_file(),\n {'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,\n 'director': TEST_DIRECTOR_ROOT_FNAME})\n\n for repository in [\"director\", \"imagerepo\"]:\n \tshutil.copytree(\n \t\tos.path.join(SOURCE_FOR_LOCAL_METADATA,repository), \n \t\tos.path.join(TEMP_CLIENT_DIR,repository))\n\n shutil.copytree(\n \tSOURCE_FOR_LOCAL_TARGETS, \n \tos.path.join(TEMP_CLIENT_DIR,'director','targets'))\n\n\n\n\n\n # TODO: Test with invalid pinning file\n # TODO: Test with pinning file lacking a Director repo.\n\n # Now try creating a Primary with a series of bad arguments, expecting\n # errors.\n\n # TODO: Add test for my_secondaries argument.\n\n # Invalid VIN:\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=5, # INVALID\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid ECU Serial\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=500, # INVALID\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid ECU Key\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key={''}, # INVALID\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid time:\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time='potato', # INVALID\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid format for Director Repository name\n with self.assertRaises(uptane.Error):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=5, #INVALID\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key, time=clock,\n timeserver_public_key = key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid name for Director repository\n with self.assertRaises(uptane.Error):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name= \"invalid\", #INVALID\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key, time=clock,\n timeserver_public_key = key_timeserver_pub,\n my_secondaries=[])\n\n\n # Invalid timeserver key\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=clock, # INVALID\n my_secondaries=[])\n\n \n\n print(TEMP_CLIENT_DIR)\n\n # Try creating a Primary, expecting it to work.\n # Initializes a Primary ECU, making a client directory and copying the root\n # file from the repositories.\n # Save the result for future tests, to save time and code.\n primary_instance = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub)\n\n\n # Check the fields initialized in the instance to make sure they're correct.\n\n self.assertEqual([], primary_instance.nonces_to_send)\n self.assertEqual([], primary_instance.nonces_sent)\n self.assertEqual(vin, primary_instance.vin)\n self.assertEqual(primary_ecu_serial, primary_instance.ecu_serial)\n self.assertEqual(primary_ecu_key, primary_instance.primary_key)\n self.assertEqual(dict(), primary_instance.ecu_manifests)\n self.assertEqual(\n primary_instance.full_client_dir, TEMP_CLIENT_DIR)\n self.assertIsInstance(primary_instance.updater, tuf.client.updater.Updater)\n tuf.formats.ANYKEY_SCHEMA.check_match(primary_instance.timeserver_public_key)\n self.assertEqual([], primary_instance.my_secondaries)", "def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()\n self.loader = self.repo.load_pltp(\"/PLTP/test.pltp\")", "def __init__(self, *args):\n self.client = None\n FileRepository.__init__(self, *args)", "def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()", "def setUp(self):\n self.instance = Commit('9b423f8c38516ed33acfa907ae56ad3868741803')", "def __init__(self, repository):\n self.__repo = repository", "def test_init(bib_minimal, bib_bibdesk):\n bib = Bibliography(bib_minimal)\n\n # Assert the file and the data is loaded\n assert bib.file\n assert bib.data\n assert bib.appdx is None\n\n # Invalid bibfile should raise an error\n with pytest.raises(FileNotFoundError, match=\"No such bibliography file\"):\n Bibliography(\"foo/bar/invalid.bib\")\n \n # Assert failure for unsupported creators\n with pytest.raises(ValueError, match=\"Unsupported creator 'invalid'\"):\n Bibliography(bib_minimal, creator='invalid')", "def setUp(self):\n self.pipeline = copy.deepcopy(self.PIPELINE)\n self.pipeline[\"pairwise_fasta\"] = io.StringIO(self.FASTA)", "def setUp(self):\n\n commits = read_file('data/test_commits_data.json')\n self.file_gitignore = commits[0]['data']['files'][0]['file']\n self.file__init__ = commits[8]['data']['files'][0]['file']\n self.file_bin = commits[2]['data']['files'][0]['file']\n self.file_py = commits[4]['data']['files'][0]['file']\n self.file_authors = commits[0]['data']['files'][1]['file']", "def setUp(self):\n\n self.parsedFile = os.path.join(os.path.dirname(__file__),\"blast-parsed.csv\")\n self.bm = BlastMapper()", "def setUp(self):\n\n commits = read_file('data/test_commits_data.json')\n self.file_gitignore = commits[0]['data']['files'][0]['file']\n self.file_tests = commits[1]['data']['files'][0]['file']\n self.file_bin = commits[2]['data']['files'][0]['file']\n self.file_perceval = commits[7]['data']['files'][0]['file']\n self.file_authors = commits[0]['data']['files'][1]['file']", "def __init__(self, mapper=None, relative_to=None):\n\n if mapper and relative_to:\n raise ValueError(\"Must specify exactly one of 'mapper' or 'relative_to'\")\n\n if relative_to:\n base = os.path.abspath(relative_to)\n if not os.path.isdir(base):\n raise ValueError('Could not find a directory to bundle relative to at %s' % base)\n self.mapper = RelativeToMapper(base)\n else:\n self.mapper = mapper or RelativeToMapper(os.getcwd())\n\n self.filemap = {}", "def test_repo_double_init(tmp_path: str) -> None:\n _ = Repo.init(tmp_path)\n os.mkdir(os.path.join(tmp_path, ZENML_DIR_NAME))\n\n with pytest.raises(Exception):\n _ = Repository(str(tmp_path)).init_repo(\n repo_path=tmp_path, analytics_opt_in=False\n )", "def setUp(self):\r\n self.mod = ImageAnnotationModule(\r\n Mock(),\r\n get_test_system(),\r\n DictFieldData({'data': self.sample_xml}),\r\n ScopeIds(None, None, None, None)\r\n )", "def setUp(self):\n\n commits = read_file('data/test_commits_data.json')\n self.file_gitignore = commits[0]['data']['files'][0]['file']\n self.file_tests = commits[1]['data']['files'][0]['file']\n self.file_bin = commits[2]['data']['files'][0]['file']\n self.file_py = commits[4]['data']['files'][0]['file']\n self.file_authors = commits[0]['data']['files'][1]['file']", "def setUp(self):\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/isbn\",\n )\n self.connector = Connector(\"openlibrary.org\")\n\n work_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_work.json\")\n edition_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_edition.json\")\n edition_md_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_markdown.json\"\n )\n edition_list_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_list.json\"\n )\n self.work_data = json.loads(work_file.read_bytes())\n self.edition_data = json.loads(edition_file.read_bytes())\n self.edition_md_data = json.loads(edition_md_file.read_bytes())\n self.edition_list_data = json.loads(edition_list_file.read_bytes())", "def setUp(self):\r\n super(TestGitExport, self).setUp()\r\n\r\n if not os.path.isdir(git_export_utils.GIT_REPO_EXPORT_DIR):\r\n os.mkdir(git_export_utils.GIT_REPO_EXPORT_DIR)\r\n self.addCleanup(shutil.rmtree, git_export_utils.GIT_REPO_EXPORT_DIR)\r\n\r\n self.bare_repo_dir = '{0}/data/test_bare.git'.format(\r\n os.path.abspath(settings.TEST_ROOT))\r\n if not os.path.isdir(self.bare_repo_dir):\r\n os.mkdir(self.bare_repo_dir)\r\n self.addCleanup(shutil.rmtree, self.bare_repo_dir)\r\n subprocess.check_output(['git', '--bare', 'init'],\r\n cwd=self.bare_repo_dir)", "def setUp(self):\n self.root = ROOT\n self.bamFile1 = self.root + \"testA.bam\"\n self.bamFile2 = self.root + \"testB.bam\"\n self.bamFile_PE = self.root + \"test_paired2.bam\"\n self.chrom = '3R'\n step_size = 50\n bin_length = 25\n\n self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2],\n binLength=bin_length,\n stepSize=step_size)", "def setUp(self):\n\n # try:\n os.mkdir(self.pipeline_folder)\n # except FileExistsError:\n # pass\n\n with open(self.pipeline_spec_file, 'w+') as stream:\n json.dump(self.pipeline_spec, stream)\n\n with open(self.pipeline_source_file, 'w+') as stream:\n json.dump(self.source_description, stream)\n\n self.source = Source(self.pipeline_id)", "def setUpClass(cls):\n Git.clone('test-clone/', 'https://github.com/ChielBruin/Gitcovery.git')\n cls.root = Git.checkout('ede9c381daf318a87a58ed9607549132e150f145')", "def setUp(self):\n\n self.params = master_phil.extract()\n\n self.params.input.xtal_name = \"FALZA-x0085\"\n\n self.params.input.in_path = os.path.join(\n os.path.realpath(\"./test/resources\"), self.params.input.xtal_name\n )\n\n self.params.validate.input.base_mtz = os.path.join(\n self.params.input.in_path, \"FALZA-x0085.free.mtz\"\n )\n\n self.params.input.mtz = os.path.join(\n self.params.input.in_path, \"FALZA-x0085.free.mtz\"\n )\n\n self.params.input.pdb = os.path.join(self.params.input.in_path, \"refine.pdb\")", "def setUp(self):\n super(BDEFileWithKeyChainTest, self).setUp()\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['bdetogo.raw'])\n self._SkipIfPathNotExists(test_path)\n\n self._os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n self._bde_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_BDE, parent=self._os_path_spec)\n resolver.Resolver.key_chain.SetCredential(\n self._bde_path_spec, 'password', self._BDE_PASSWORD)", "def setUp(self):\n\n try:\n os.mkdir(self.pipeline_folder)\n except FileExistsError:\n pass\n\n with open(self.pipeline_spec_file, 'w+') as stream:\n json.dump(self.pipeline_spec, stream)\n\n with open(self.pipeline_source_file, 'w+') as stream:\n json.dump(self.source_description, stream)\n\n self.source = Source(folder=self.pipeline_folder)", "def test_init():\n\n test_project = tempfile.mkdtemp(dir=self._tempdir)\n\n inventory_fname = os.path.join(test_project, \".inventory.toml\")\n config_fname = os.path.join(test_project, \".config.toml\")\n\n assert 0 == subprocess.call([\n sys.executable, \"-u\", \"-m\", \"avalon.inventory\", \"--init\"\n ], cwd=test_project)\n\n assert os.path.isfile(inventory_fname), \".inventory.toml not found\"\n assert os.path.isfile(config_fname), \".config.toml not found\"\n\n with open(inventory_fname) as f:\n inventory_dict = toml.load(f)\n assert_equals(inventory_dict, inventory.DEFAULTS[\"inventory\"])\n\n with open(config_fname) as f:\n config_dict = toml.load(f)\n assert_equals(config_dict, inventory.DEFAULTS[\"config\"])", "def setUp(self): \n \n self.obo = MinimalObo(obo_file)\n self.emapa = get_emapa_map(emapa_file, self.obo)", "def __init__(self, repo_config: Repository, s3_client: Client, s3_bucket: str):\n self.repo_config = repo_config\n self.s3_client = s3_client\n self.s3_bucket = s3_bucket", "def setUp(self):\n self.source_id = '12345'\n self.checksum = 'asdfqwert1=='\n self.stream = io.BytesIO(b'fakecontent')", "def setUp(self):\n self.source_id = '12345'\n self.checksum = 'asdfqwert1=='\n self.stream = io.BytesIO(b'fakecontent')" ]
[ "0.62803483", "0.60776395", "0.6074035", "0.60606027", "0.6036678", "0.6020233", "0.59328747", "0.5792134", "0.5783295", "0.57514167", "0.5745627", "0.5695448", "0.5677711", "0.5663948", "0.5652757", "0.5641693", "0.5638307", "0.56356263", "0.560679", "0.55909425", "0.5560872", "0.5559826", "0.5552773", "0.55344397", "0.5531945", "0.55250764", "0.5522855", "0.5499599", "0.5494527", "0.5494527" ]
0.7788326
0
Test PBAlignFiles.__init__() with a region table.
def test_init_region_table(self): # With an artifical region table regionTable = path.join(self.rootDir, "data/lambda.rgn.h5") p = PBAlignFiles(self.inputFileName, self.referenceFile, self.outputFileName, regionTable) self.assertTrue(filecmp.cmp(p.regionTable, regionTable))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init(self):\n # Without region table\n p = PBAlignFiles(self.inputFileName,\n self.referencePath,\n self.outputFileName)\n self.assertTrue(filecmp.cmp(p.inputFileName, self.inputFileName))\n self.assertTrue(p.referencePath, path.abspath(path.expanduser(self.referencePath)))\n self.assertTrue(filecmp.cmp(p.targetFileName, self.targetFileName))\n self.assertTrue(filecmp.cmp(p.outputFileName, self.outputFileName))\n self.assertIsNone(p.regionTable)", "def __init__(self, region):\r\n self.region = region", "def setUp(self):\r\n\r\n self.otu_table_values = array([[0, 0, 9, 5, 3, 1],\r\n [1, 5, 4, 0, 3, 2],\r\n [2, 3, 1, 1, 2, 5]])\r\n {(0, 2): 9.0, (0, 3): 5.0, (0, 4): 3.0, (0, 5): 1.0,\r\n (1, 0): 1.0, (1, 1): 5.0, (1, 2): 4.0, (1, 4): 3.0, (1, 5): 2.0,\r\n (2, 0): 2.0, (2, 1): 3.0, (2, 2): 1.0, (2, 3): 1.0, (2, 4): 2.0, (2, 5): 5.0}\r\n self.otu_table = table_factory(self.otu_table_values,\r\n ['Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'],\r\n ['OTU1', 'OTU2', 'OTU3'],\r\n [None, None, None, None, None, None],\r\n [{\"taxonomy\": ['Bacteria']},\r\n {\"taxonomy\": ['Archaea']},\r\n {\"taxonomy\": ['Streptococcus']}])\r\n self.otu_table_f = table_factory(self.otu_table_values,\r\n ['Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'],\r\n ['OTU1', 'OTU2', 'OTU3'],\r\n [None, None, None, None, None, None],\r\n [{\"taxonomy\": ['1A', '1B', '1C', 'Bacteria']},\r\n {\"taxonomy\":\r\n ['2A', '2B', '2C', 'Archaea']},\r\n {\"taxonomy\": ['3A', '3B', '3C', 'Streptococcus']}])\r\n\r\n self.full_lineages = [['1A', '1B', '1C', 'Bacteria'],\r\n ['2A', '2B', '2C', 'Archaea'],\r\n ['3A', '3B', '3C', 'Streptococcus']]\r\n self.metadata = [[['Sample1', 'NA', 'A'],\r\n ['Sample2', 'NA', 'B'],\r\n ['Sample3', 'NA', 'A'],\r\n ['Sample4', 'NA', 'B'],\r\n ['Sample5', 'NA', 'A'],\r\n ['Sample6', 'NA', 'B']],\r\n ['SampleID', 'CAT1', 'CAT2'], []]\r\n self.tree_text = [\"('OTU3',('OTU1','OTU2'))\"]\r\n fh, self.tmp_heatmap_fpath = mkstemp(prefix='test_heatmap_',\r\n suffix='.pdf')\r\n close(fh)", "def region_setup(self, slices, ipa_regions):\n self.ipa_regions = ipa_regions\n self.slices = slices", "def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))", "def setUp(self):\n self.data = StringIO.StringIO(\"\"\"\nchromosome,start,end,header_a,header_b\nchr1,1,10000,A line 1, B line 1\nchr1,20000,30000,A line 2, B line 2\nchr2,100,110,A line 3, B line 3\n\"\"\")", "def test_writer_init(self):\n\n fields = [\n ('Prop2', 'prop2'),\n ]\n\n writer = BaseTSVWriter(fields, field_prefix='prop1')\n\n assert list(writer.columns) == ['Prop2'] # writer.columns is of odict_keys type so cast to list\n assert len(writer.fields) == len(fields)\n assert len(writer.field_mappers) == len(fields)\n assert callable(writer.prefix_mapper)\n\n assert BaseTSVWriter([]).prefix_mapper is None", "def __init__(self, zip_file, stream_name):\n super(_SerializedDataOffsetTable, self).__init__()\n self._offsets = []\n self._stream_name = stream_name\n self._zip_file = zip_file", "def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def setUpClass(cls):\n cls.use_temp_region()\n cls.runModule(\"g.region\", raster=\"elev_state_500m\")", "def setUp(self):\r\n self.l19_data = array([\r\n [7, 1, 0, 0, 0, 0, 0, 0, 0],\r\n [4, 2, 0, 0, 0, 1, 0, 0, 0],\r\n [2, 4, 0, 0, 0, 1, 0, 0, 0],\r\n [1, 7, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 8, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 7, 1, 0, 0, 0, 0, 0, 0],\r\n [0, 4, 2, 0, 0, 0, 2, 0, 0],\r\n [0, 2, 4, 0, 0, 0, 1, 0, 0],\r\n [0, 1, 7, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 8, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 7, 1, 0, 0, 0, 0, 0],\r\n [0, 0, 4, 2, 0, 0, 0, 3, 0],\r\n [0, 0, 2, 4, 0, 0, 0, 1, 0],\r\n [0, 0, 1, 7, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 8, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 7, 1, 0, 0, 0, 0],\r\n [0, 0, 0, 4, 2, 0, 0, 0, 4],\r\n [0, 0, 0, 2, 4, 0, 0, 0, 1],\r\n [0, 0, 0, 1, 7, 0, 0, 0, 0]\r\n ])\r\n self.l19_sample_names = [\r\n 'sam1', 'sam2', 'sam3', 'sam4', 'sam5', 'sam6',\r\n 'sam7', 'sam8', 'sam9', 'sam_middle', 'sam11', 'sam12', 'sam13',\r\n 'sam14', 'sam15', 'sam16', 'sam17', 'sam18', 'sam19']\r\n self.l19_taxon_names = ['tax1', 'tax2', 'tax3', 'tax4', 'endbigtaxon',\r\n 'tax6', 'tax7', 'tax8', 'tax9']\r\n\r\n self.legacy_otu_table1 = legacy_otu_table1\r\n self.otu_table1 = otu_table1\r\n self.otu_table_without_leading_comment = \\\r\n otu_table_without_leading_comment\r\n self.expected_lineages1 = expected_lineages1\r\n self.taxa_summary1 = taxa_summary1\r\n self.taxa_summary1_expected = taxa_summary1_expected\r\n self.otu_table1_floats = otu_table1_floats\r\n self.files_to_remove = []\r\n self.denoiser_mapping1 = denoiser_mapping1.split('\\n')\r\n self.sam_data1 = sam_data1.split(\"\\n\")\r\n self.sam1_expected = sam1_expected", "def setUp(self):\n self.compound = PyFBA.metabolism.Compound(\"t1\", \"test compound\")\n self.compound.abbreviation = \"Cool\"\n self.compound.add_attribute('What', \"Everything\")\n self.compound_with_loc = PyFBA.metabolism.CompoundWithLocation.from_compound(self.compound, \"extracellular\")", "def setUp(self):\r\n self.output_dir = '/tmp/'\r\n\r\n otu_table_vals = array([[0, 0], [1, 5]])\r\n\r\n self.otu_table = table_factory(otu_table_vals,\r\n ['Sample1', 'Sample2'],\r\n ['OTU1', 'OTU2'],\r\n [None, None],\r\n [{\"taxonomy\": [\"Bacteria\"]},\r\n {\"taxonomy\": [\"Archaea\"]}])\r\n\r\n filt_otu_table_vals = array([[1, 5]])\r\n\r\n self.filt_otu_table = table_factory(filt_otu_table_vals,\r\n ['Sample1', 'Sample2'],\r\n ['OTU2'],\r\n [None, None],\r\n [{\"taxonomy\": [\"Archaea\"]}])\r\n\r\n self.num_otu_hits = 5\r\n self._folders_to_cleanup = []", "def setUp(self): \n \n self.obo = MinimalObo(obo_file)\n self.emapa = get_emapa_map(emapa_file, self.obo)", "def __init__(self,dir,factory=FileInfo):\n self.dir = dir\n self.factory=factory\n self.data = {}\n self.table = Table(os.path.join(self.dir,'Mash','Table.pkl'))\n self.corrupted = {} #--errorMessage = corrupted[fileName]", "def testInitialization(self):\n test_path = self._GetTestFilePath(['pinfo_test.plaso'])\n test_reader = reader.SQLiteStorageFileReader(test_path)\n self.assertIsNotNone(test_reader)", "def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))", "def __init__(self, *args, **kwargs):\n _gdi_.Region_swiginit(self,_gdi_.new_Region(*args, **kwargs))", "def setUp(self):\n self.vencode_obj = iext.GetVencodesFantom(files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=\"sampling\",\n n_regulatory_elements=self.k,\n number_vencodes=4,\n parsed=self.parsed,\n thresholds=self.thresholds, n_samples=10000,\n data_type=self.data_type, sample_type=self.sample_type)\n self.vencodes = self.vencode_obj.coordinates" ]
[ "0.79167587", "0.6060171", "0.5885254", "0.58226097", "0.57932025", "0.5701988", "0.5682602", "0.56778866", "0.56713814", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.56369597", "0.55827403", "0.55608726", "0.5514674", "0.5479603", "0.54788846", "0.5475509", "0.5468731", "0.5452167", "0.5436022", "0.5435324" ]
0.87365645
0
Method to initialize apscheduler
def initialize_scheduler(self): scheduler = BackgroundScheduler() scheduler.add_job(self.do, 'interval', minutes=1) scheduler.start() self.do()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_scheduler(self):\n self._sched = BackgroundScheduler()\n self._sched.add_job(self._check_rain, trigger='cron', minute='*/5')\n self._sched.start()", "def __init__(self):\n try:\n self.scheduler = ap_scheduler\n except Exception as e:\n logger_obj.print_log('(Scheduler.py(__init__) - Something went wrong ' + str(e), 'exception')\n raise Exception(e)", "def __init__(self, *args, **kwargs):\n BaseScheduler.__init__(self, *args, **kwargs)", "def schedule_start(self):\n self.initialize_scheduler()", "def initialize_scheduler():\n\n with SCHED_LOCK:\n\n # Check if scheduler should be started\n start_jobs = not len(SCHED.get_jobs())\n\n # Update check\n github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0\n\n schedule_job(versioncheck.checkGithub, 'Check GitHub for updates',\n hours=0, minutes=github_minutes, seconds=0)\n\n # Our interval should never be less than 30 seconds\n monitor_seconds = CONFIG.MONITORING_INTERVAL if CONFIG.MONITORING_INTERVAL >= 30 else 30\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(plextv.get_real_pms_url, 'Refresh Plex server URLs',\n hours=12, minutes=0, seconds=0)\n schedule_job(pmsconnect.get_server_friendly_name, 'Refresh Plex server name',\n hours=12, minutes=0, seconds=0)\n\n schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.NOTIFY_RECENTLY_ADDED))\n schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.MONITOR_REMOTE_ACCESS))\n schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',\n hours=12 * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)\n\n # If we're not using websockets then fall back to polling\n if not CONFIG.MONITORING_USE_WEBSOCKET or POLLING_FAILOVER:\n schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',\n hours=0, minutes=0, seconds=monitor_seconds)\n\n # Refresh the users list and libraries list\n user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12\n library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12\n\n if CONFIG.PMS_TOKEN:\n schedule_job(plextv.refresh_users, 'Refresh users list',\n hours=user_hours, minutes=0, seconds=0)\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(pmsconnect.refresh_libraries, 'Refresh libraries list',\n hours=library_hours, minutes=0, seconds=0)\n\n backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6\n\n schedule_job(database.make_backup, 'Backup PlexPy database',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n schedule_job(config.make_backup, 'Backup PlexPy config',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n\n # Start scheduler\n if start_jobs and len(SCHED.get_jobs()):\n try:\n SCHED.start()\n except Exception as e:\n logger.info(e)\n\n # Debug\n #SCHED.print_jobs()", "def __init__(self, *args, time_frame=3, **kargs):\n super(Scheduler, self).__init__(*args, **kargs)\n self.time_frame = time_frame\n self.running_jobs = queue.Queue()\n self.scheduler_manager = []\n self.task_manager = None", "def __init__(self):\n\n super(VirtualTimeScheduler, self).__init__()\n self.event_queue = Queue.PriorityQueue()", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def init_app(self, app):\n\n self.app = app\n self.app.apscheduler = self\n\n self._load_config()\n if self.api_enabled:\n self._load_api()", "def start_scheduler():\n from security_monkey import scheduler\n scheduler.setup_scheduler()\n scheduler.scheduler.start()", "def __init__(self, scheduler_name, task, interval, delay=0):\n\n self.scheduler_name = scheduler_name\n self.task = task\n self.interval = interval\n self.delay = delay\n self.scheduler = sched.scheduler(time.time, time.sleep)\n self.__running = False\n super(Scheduler, self).__init__(name=self.scheduler_name)\n self.setDaemon(True)", "def __init__(self) -> None:\n\n self._local = CurrentThreadScheduler._Local()", "def schedule_start(self):\n print(\"Scheduler for monitoring request is running\")\n self.initialize_scheduler()", "def __init__(self, scheduler):\n self._scheduler = scheduler\n self._result = None\n self._timeouts = None", "def _initJobs(self):\n pass", "def createScheduler_(self):\n klass_name = 'Scheduler' + string.capitalize(self.scheduler_name)\n file_name = klass_name\n try:\n klass = importName(file_name, klass_name)\n except KeyError:\n msg = 'No `class '+klass_name+'` found in file `'+file_name+'.py`'\n raise SkimException(msg)\n except ImportError, e:\n msg = 'Cannot create scheduler '+self.scheduler_name\n msg += ' (file: '+file_name+', class '+klass_name+'):\\n'\n msg += str(e)\n raise SkimException(msg)\n\n common.scheduler = klass()\n common.scheduler.configure(self.cfg_params)\n return", "def init(args):\n # Setup AWS connection\n aws_eu = connect_from_conf('aws_eu')\n aws_us = connect_from_conf('aws_us')\n ec2_conn['eu-west-1'] = aws_eu['ec2']\n elb_conn['eu-west-1'] = aws_eu['elb']\n ec2_conn['us-west-1'] = aws_us['ec2']\n elb_conn['us-west-1'] = aws_us['elb']\n global schedules\n schedules = get_schedules()", "def __init__(self):\n self._update_scheduled = False", "def __init__(self):\n\n super(Scheduler, self).__init__()\n self.num_steps = 0\n self.current_time = 0.0\n self.components = []", "def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:\n if self.is_cron:\n # Scheduler always executes at the exact minute to check for cron triggering\n scheduler.every().minute.at(\":00\").do(callback)\n else:\n # Only activate when an interval is specified\n # If not the only way is to trigger the poll by the api `trigger` endpoint\n if self._poll_interval:\n # Scheduler executes every interval seconds to execute the poll\n scheduler.every(self._poll_interval).seconds.do(callback)", "def __run_schedules():\n while True:\n __scheduler.run()", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler" ]
[ "0.7957172", "0.7594884", "0.7511692", "0.7477816", "0.74715924", "0.74572617", "0.71291786", "0.7065361", "0.7037644", "0.6962178", "0.6906121", "0.68748313", "0.6815364", "0.67771685", "0.6687212", "0.66796535", "0.65879136", "0.654168", "0.6524913", "0.6480765", "0.64684284", "0.6414065", "0.6414065", "0.6414065", "0.6414065", "0.6414065", "0.6414065", "0.6414065", "0.6414065", "0.6414065" ]
0.8041467
0
Gets the (sub)sequence where the last element is less, greater or equal to target according to the sequence's order. If extend = True and the sequence's next element would contain more elements before the target this
def get_subsequence(self, target, extend=True): # get the index i of hypothetical position in sorted sequence i = bisect.bisect_left(self.sequence, target) if i: # i is max index but here might be more elements <= target if i == len(self.sequence) and extend: if self.ascending: while self.next_element() < target: self.extend_sequence() else: while self.next_element() > target: self.extend_sequence() return self.sequence # i is not max index else: return self.sequence[:i] raise ValueError('%s' % i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand(sequence1: \"Sequence\", sequence2: \"Sequence\") -> \"Sequence\":\n if sequence1.start <= sequence2.stop:\n return Sequence(sequence1.start, sequence2.stop)\n else:\n return Sequence(sequence2.stop, sequence1.start)", "def find_period_below(self, start, end, target, length):\n\n if start > end:\n raise ValueError(\"End needs to be after start!\")\n if length < 0:\n raise ValueError(\"Period length must be larger than zero!\")\n\n period_start = (start if self.get(start) <= target else None)\n\n start_ix = self._trace.bisect_right(start)\n end_ix = self._trace.bisect_left(end)\n for time, lvl in self._trace.items()[start_ix:end_ix]:\n # Period long enough?\n if period_start is not None:\n if time >= period_start + length:\n return period_start\n # Not enough space until end?\n elif time + length > end:\n return None\n # Above target? Reset period\n if lvl > target:\n period_start = None\n else:\n if period_start is None:\n period_start = time\n\n # Possible at end?\n if period_start is not None and period_start+length <= end:\n return period_start\n\n # Nothing found\n return None", "def next_down(v, seq):\n rseq = list(seq[:])\n rseq.reverse()\n\n for s in rseq:\n if s < v:\n return s\n return v", "def next_up(v, seq):\n for s in seq:\n if s > v:\n return s\n return v", "def __gt__(self, seq):\n return not self.__le__(seq)", "def gt(self, other):\n self._raise_if_null(other)\n if hasattr(other, 'end'):\n return self.begin >= other.end\n else:\n return self.begin > other", "def greater(self):\n return [x for x in TransitiveIdeal(attrcall('succ'), [self])]", "def __ge__(self, seq):\n return not self.__lt__(seq)", "def run(self, seq: list, minimum: bool = True):\n if minimum:\n # less-than operator (looking for the minimum)\n compare = operator.lt\n else:\n # greater-than operator (looking for the maximum)\n compare = operator.gt\n\n if len(seq) < 3: # Mimimum length required for the sequence\n return None\n\n # Initial conditions.\n # x0 and x3: indexes for lower and upper bounds of the interval\n # x1 and x2: median points (having x1 < x2)\n x_0 = 0\n x_3 = len(seq) - 1\n dist = self.phi*(x_3 - x_0)\n x_1 = round(x_3 - dist)\n x_2 = round(x_0 + dist)\n\n # Iterative search\n while x_2 - x_1 > 0:\n # Comparison operator, can be operator.lt or operator.gt\n if compare(seq[x_1], seq[x_2]):\n x_3, x_2 = x_2, x_1\n x_1 = round(x_3 - self.phi*(x_3-x_0))\n else:\n x_0, x_1 = x_1, x_2\n x_2 = round(x_0 + self.phi*(x_3-x_0))\n\n # Finally we process the last few elements of the reduced sequence\n while x_3 - x_0 > 0:\n # Comparison operator, can be operator.lt or operator.gt\n if compare(seq[x_0], seq[x_3]):\n x_3 = x_3 - 1\n else:\n x_0 = x_0 + 1\n\n return seq[x_0]", "def le(self, other):\n self._raise_if_null(other)\n return self.end <= getattr(other, 'end', other)", "def __lt__(self, other):\n return self.sequence < other.sequence", "def minOperations(self, target: list[int], arr: list[int]) -> int:\n orders = {x: i for i, x in enumerate(target)}\n stack = []\n for x in arr:\n if x not in orders:\n continue\n\n i = bisect_left(stack, orders[x])\n if i == len(stack):\n stack.append(0)\n\n stack[i] = orders[x]\n\n return len(target) - len(stack)", "def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):\n\t\tinp = [start_id] + sequence[:]\n\t\ttarget = sequence[:]\n\t\tif len(inp) > max_len: # truncate\n\t\t\tinp = inp[:max_len]\n\t\t\ttarget = target[:max_len] # no end_token\n\t\telse: # no truncation\n\t\t\ttarget.append(stop_id) # end token\n\t\tassert len(inp) == len(target)\n\t\treturn inp, target", "def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent", "def __sub__(self, other: Seq) -> int:\n return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))", "def __le__(self, seq):\n if any(self._arr[i] > seq[i] for i in range(min(self._length, len(seq)))):\n return False\n return self._length <= len(seq)", "def gaps_of_end(self, extremity):\n return getattr(self, \"gaps_{}\".format(extremity))", "def currentBelow(requestContext, seriesList, n):\n return [ series for series in seriesList if safeLast(series) <= n ]", "def extend_seq(mrnaseq, mrna_frag, total_length=50):\n #\n # Prepare sequences with no gaps\n #\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n #\n # check if the sequence is shorter\n #\n if len(mrna_frag_nogap) > total_length:\n syserr(\"mrnaseq_nogap: \", mrnaseq_nogap)\n syserr(\"mrna_frag_nogap: \", mrna_frag_nogap)\n syserr(\"mrnaseq: \", mrnaseq)\n syserr(\"mrna_frag: \", mrna_frag)\n raise Exception(\n \"Check your sequences maybe you should shrink, not extend them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean([i for i, x in enumerate(mrna_frag) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the extension of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length > total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = total_length - length\n quot = dif // 2 # this is explicit integer division\n l_ext = li - quot # TODO check if they are not lower than 0\n u_ext = ui + (dif - quot)\n if (l_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # extend left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n if (li - dif < 0):\n return mrnaseq_nogap[:ui + abs(li - dif)]\n else:\n return mrnaseq_nogap[li - dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n # if there is noting to extend to the right\n if ui + dif > len(mrnaseq_nogap):\n return mrnaseq_nogap[li - ((ui + dif) - len(mrnaseq_nogap)):]\n else:\n return mrnaseq_nogap[li:ui + dif]", "def test_kth_from_end_item_when_target_is_head(small_list):\n expected = 4\n actual = small_list.kth_from_end(3)\n assert expected == actual", "def binary_search(seq, f, target):\n if not seq or f(seq[0]) > target:\n return 0\n elif f(seq[-1]) < target:\n return len(seq)\n upper = len(seq)\n lower = 0\n while (upper - lower) > 1:\n current = (upper + lower) // 2\n next_val = f(seq[current])\n if next_val > target:\n upper = current\n elif next_val <= target:\n lower = current\n return upper", "def get_segment_after(self, target_segment):\n index = self.segments.index(target_segment)\n last_index = len(self.segments) - 1\n if index == last_index:\n if self.loop:\n return self.segments[0] # reset to first segment\n else:\n # TODO this might be better off with an Exception\n return target_segment #return same thing\n return self.segments[index+1]", "def __le__(self, other):\n return self.element() <= other.element()", "def __gt__(self, other):\n return self.element() > other.element()", "def max(self, other):\n return other if self.less(other) else self", "def until_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[-1]+1])", "def longincseq(v):\n n=len(v)\n if n==0: return -1\n l = 0\n u = n-1\n max2here=1\n maxsofar=1\n for i in xrange(l+1, u+1):\n if v[i]>v[i-1]: \n max2here+=1\n else:\n max2here=1\n maxsofar = max(maxsofar, max2here)\n return maxsofar", "def adapt_target(self, target):\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - self.buggy_offset\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs", "def selection_sort(seq):\n length = len(seq)\n\n for i in range(length-1, 0, -1):\n\n max_pos = 0 # Position of max value\n\n for j in range(1, i+1):\n if seq[j] > seq[max_pos]:\n max_pos = j\n\n tmp = seq[i]\n seq[i] = seq[max_pos]\n seq[max_pos] = tmp\n\n return seq", "def next_larger(self, k):\n node = self.find(k)\n return node and node.successor()" ]
[ "0.5086548", "0.50799274", "0.50425124", "0.50191414", "0.49865168", "0.4884031", "0.48734006", "0.48432878", "0.48029026", "0.4758451", "0.47282988", "0.4662645", "0.46440113", "0.46426633", "0.46387488", "0.46369892", "0.45993978", "0.4573143", "0.45452526", "0.45346263", "0.45031494", "0.45001596", "0.4498038", "0.4496841", "0.44877666", "0.44802067", "0.4480014", "0.4478808", "0.44770816", "0.4468745" ]
0.79104257
0
Compute the log likelihood under Poisson distribution log poisson(k, r) = log(r^k e^(r) / k!) = k log(r) r log k! log poisson(k, r=exp(l)) = k l exp(l) lgamma(k + 1)
def poisson_log_likelihood(x, log_rate): return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)", "def LLR_poisson(k, mu0, b=0, EPS=1e-15):\n muhat = k - b # maximum likelihood non-negative truncated estimate\n muhat[muhat < EPS] = 2*EPS\n\n # Log-likelihood (density) ratios\n # -2[ Log(l(mu_0)) - Log(l(muhat))]\n LLR = 2*(mu0 - muhat + k*np.log((b+muhat) / (b+mu0)))\n return LLR", "def kl_poisson(x, y):\n x = max(x, eps)\n y = max(y, eps)\n return y-x+x*log(x/y)", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def poisson(k, lamb):\n return (lamb**k/factorial(k)) * np.exp(-lamb)", "def poisson(k, lamb):\n return (lamb**k/factorial(k)) * np.exp(-lamb)", "def poisson(k, lamb):\n return (lamb**k/factorial(k)) * np.exp(-lamb)", "def PoissonGaussianLoss(mu_kl, log_var_kl) :\n def pgl(x, lambdas) :\n N = K.int_shape(lambdas)[1]\n recon = -1.*K.sum(x*lambdas - K.exp(lambdas), axis=1)\n dkl = -0.5 * K.sum(-K.exp(log_var_kl) - K.square(mu_kl) + 1. + log_var_kl, axis=-1)\n return recon + dkl\n return pgl", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᡀΣ⁻¹G = LLα΅€.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # HΞΌ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - HΞΌ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: Β½|L⁻¹(GᡀΣ⁻¹)y|Β²\n # (GᡀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # Β½|L⁻¹(GᡀΣ⁻¹)y|Β² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: Β½log |K⁻¹| - log |L| + Β½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def Poisson(n, k):\n\tp = math.exp(-k) * math.pow(k, n) / float(Factorial(n))\n\tassert 0.0 <= p <= 1.0, \"Error, value of p is invalid probability: \" + str(p)\n\treturn p", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def poisson_log_posterior(rate, rate_multiplier, num_events, log_rate):\n\n if log_rate:\n log_prior = 0.5*rate\n else:\n log_prior = -0.5*np.log(rate)\n\n rate_total = rate_multiplier * (np.exp(rate) if log_rate else rate)\n\n log_likelihood = -rate_total + num_events * np.log(rate_total) \\\n - np.log(np.math.factorial(num_events))\n\n log_post = log_likelihood + log_prior\n\n if np.isscalar(log_post):\n log_post = log_post if np.isfinite(log_post) else -np.inf\n else:\n log_post[~np.isfinite(log_post)] = -np.inf\n\n return log_post", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood", "def loglikelihood(R, R_n, variance, M, K):\n if 0 <= variance <= EPS:\n res = 0\n else:\n res = R_n * (np.log(R_n) - np.log(R) - 0.5 * (np.log(2 * np.pi) + M * np.log(variance) + 1)) + 0.5 * K\n if res == np.inf:\n res = 0\n return res", "def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll", "def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()", "def log_poisson_obs_p(nobs:int, nexp:float, nexperr:float) -> Tuple[float,float]:\n if nobs == 0:\n # p=1, 1-p=0 --> logp=0,log(1-p)=-inf\n return (0, -np.inf)\n\n if nexperr > 0:\n nexpalt = nexp if nexp>0 else nexperr\n tau = nexpalt/(nexperr*nexperr)\n b = nexpalt*tau+1\n x = 1/(1+tau)\n tlogp = log_incompbeta(nobs, b, x)\n else: # assume error==0. nobs>0 at this stage\n logp = stats.poisson.logsf(nobs-1, nexp)\n p = stats.poisson.sf(nobs-1, nexp)\n tlogp = (logp, np.log(1-p))\n\n return tlogp", "def poisson_pdf(x, u, log=False):\n #return np.exp(-u)*(u**x)/factorial(x)\n #return np.exp(-u)*(u**x)/gamma(x+1)\n if log:\n return poisson.logpmf(x, u)\n return poisson.pmf(x, u)", "def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value", "def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl", "def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr_grid\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def log_gaussian_density(x, mu, L):\n\n D = x.shape[-1]\n # print(\"x shape:\", x.shape)\n # print(\"mu shape:\", mu.shape)\n # print(\"L shape:\", L.shape)\n\n a = np.linalg.solve(L, x - mu) # (..., K)-array\n\n logp = - 0.5 * D * np.log(2.0 * np.pi) - np.sum(np.log(np.diagonal(L))) \\\n - 0.5 * np.sum(a**2.0, axis=-1) # (...)-array; sums only the dimension of the Gaussian vector\n\n return logp", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)" ]
[ "0.85983634", "0.71267134", "0.70490605", "0.70171106", "0.70030177", "0.70030177", "0.70030177", "0.69289047", "0.6853471", "0.68229437", "0.6710962", "0.66251004", "0.6608753", "0.66048336", "0.66042256", "0.6581323", "0.655466", "0.6473782", "0.6455808", "0.64403427", "0.63981444", "0.6397833", "0.6391459", "0.6378609", "0.635937", "0.6355459", "0.6353233", "0.6350623", "0.63240045", "0.63116306" ]
0.8131659
1
Loglikelhood under a multidimensional Gaussian distribution with diagonal covariance. Returns the loglikelihood for the multidim distribution.
def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin): return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def log_gaussian_density(x, mu, L):\n\n D = x.shape[-1]\n # print(\"x shape:\", x.shape)\n # print(\"mu shape:\", mu.shape)\n # print(\"L shape:\", L.shape)\n\n a = np.linalg.solve(L, x - mu) # (..., K)-array\n\n logp = - 0.5 * D * np.log(2.0 * np.pi) - np.sum(np.log(np.diagonal(L))) \\\n - 0.5 * np.sum(a**2.0, axis=-1) # (...)-array; sums only the dimension of the Gaussian vector\n\n return logp", "def diag_gaussian_log_likelihood(z, mean=0.0, logvar=0.0, varmin=1e-16):\n logvar_wm = np.log(np.exp(logvar) + varmin)\n return (-0.5 * (logvar + np.log(2*np.pi) +\n np.square((z-mean)/( np.exp(0.5*(logvar_wm))))))", "def log_multivariate_normal_density_diag(X, means, covars):\n n_samples, n_dim = X.shape\n lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)\n + np.sum((means ** 2) / covars, 1)\n - 2 * np.dot(X, (means / covars).T)\n + np.dot(X ** 2, (1.0 / covars).T))\n return lpr", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def log_likelihood(X, mu, sigma, phi):\n ll = None\n\n #######################################################################\n # TODO: #\n # Compute the log-likelihood of the data under the current model. #\n # This is used to check for convergnence of the algorithm. #\n #######################################################################\n\n ll = np.zeros((X.shape[0], 1))\n k = mu.shape[0]\n\n for i in range(k):\n ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]\n\n ll = sum(np.log(ll))\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return ll", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def log_likelihood(mu, sigma, y, T):\n ll = 0.\n for yi, Ti in zip(y, T):\n d = yi.size\n log_det_cov = np.linalg.slogdet(sigma[Ti])[1]\n y_minus_mean = yi - mu[Ti]\n term3 = np.dot(y_minus_mean.T.ravel(),\n np.linalg.solve(sigma[Ti], y_minus_mean.T).ravel())\n ll += (-0.5 * d * np.log(2 * np.pi) - 0.5 * log_det_cov - 0.5 * term3)\n return ll", "def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()", "def log_likelihood(X, k, means, cov):\n ll = np.zeros((len(X), k))\n for i in range(len(X)):\n for j in range(k):\n # TODO: scipy implement myself ?\n likel = scipy.stats.norm.pdf(X[i], means[j], np.sqrt(cov[j]))\n ll[i, j] = np.log(likel)\n\n return ll", "def compute_log_marginal_likelihood(\n K_i: torch.Tensor,\n logDetK: torch.Tensor,\n y: torch.Tensor,\n normalize: bool = True,\n log_prior_dist=None,\n):\n lml = (\n -0.5 * y.t() @ K_i @ y\n + 0.5 * logDetK\n - y.shape[0]\n / 2.0\n * torch.log(\n 2\n * torch.tensor(\n np.pi,\n )\n )\n )\n if log_prior_dist is not None:\n lml -= log_prior_dist\n return lml / y.shape[0] if normalize else lml", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᡀΣ⁻¹G = LLα΅€.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # HΞΌ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - HΞΌ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: Β½|L⁻¹(GᡀΣ⁻¹)y|Β²\n # (GᡀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # Β½|L⁻¹(GᡀΣ⁻¹)y|Β² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: Β½log |K⁻¹| - log |L| + Β½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def log_marginal_likelihood(self, eval_gradient=False):\n A, B, mu_tilda, gamma_tilda, r_grid, n_grid = self.calc_tau(return_all=True)\n\n gamma = self.GAMMA\n gamma_y = self.GAMMA_Y\n\n A1 = np.copy(self.baseTau0)\n A1[np.diag_indices_from(A1)] = 2 * self.ndim\n\n # A1 = (A - B) / gamma_y\n A2 = np.diag(n_grid)\n\n B_inv = np.linalg.inv(B)\n A_inv = np.linalg.inv(A)\n\n M_lambda = B - B.dot(np.linalg.inv(A).dot(B))\n\n # log_likelihood = mu_tilda.dot(M_lambda).dot(\n # mu_tilda[:, np.newaxis]) - np.nan_to_num(np.log(np.linalg.det(M_lambda)))\n log_likelihood = mu_tilda.dot(M_lambda).dot(mu_tilda[:, np.newaxis])\n\n # log_likelihood = -log_likelihood\n print('log_likelihood: %f' % log_likelihood)\n\n Lambda = B - B.dot(A_inv.dot(B))\n Lambda_inv = np.linalg.inv(Lambda)\n\n M_gamma = A2 - A2.dot(2 * gamma * A_inv - (gamma ** 2) * A_inv.dot(A2).dot(A_inv)).dot(A2)\n\n mean_r_grid = np.nan_to_num(np.array([r.sum() for r in r_grid]) / n_grid)\n\n log_likelihood_grad_gamma = mean_r_grid.dot(M_gamma.dot(mean_r_grid[:, np.newaxis]))[0]\n\n tmpMat = A2.dot(A_inv).dot(A1).dot(A_inv).dot(A2)\n\n log_likelihood_grad_gamma_y = (gamma ** 2) * (\n np.trace(Lambda_inv.dot(tmpMat) + mean_r_grid.dot(tmpMat.dot(mean_r_grid[:, np.newaxis]))[0]))\n\n log_likelihood_gradient = np.array([log_likelihood_grad_gamma, log_likelihood_grad_gamma_y])\n print('log_likelihood_grad: %s' % log_likelihood_gradient)\n\n if eval_gradient:\n return log_likelihood, log_likelihood_gradient\n else:\n return log_likelihood", "def kl_gaussian_gaussian_analytic(mu_q, logvar_q, mu_p, logvar_p):\n # init\n batch_size = mu_q.size(0)\n input_size = mu_q.size(1)\n mu_q = mu_q.view(batch_size, -1)\n logvar_q = logvar_q.view(batch_size, -1)\n mu_p = mu_p.view(batch_size, -1)\n logvar_p = logvar_p.view(batch_size, -1) \n\n # kld\n cov_q = torch.exp(logvar_q)\n cov_p = torch.exp(logvar_p)\n cov_p_inverse = 1 / cov_p\n mu_diff = mu_p - mu_q\n log_det_cov_p = torch.sum(logvar_p, dim=1)\n log_det_cov_q = torch.sum(logvar_q, dim=1)\n trace_det = torch.sum(cov_p_inverse * cov_q, dim=1)\n fourth_term = torch.sum(mu_diff * cov_p_inverse * mu_diff, dim=1)\n kl_div = 0.5 * (log_det_cov_p - log_det_cov_q - input_size + trace_det + fourth_term)\n return kl_div", "def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def log_gaussian_likelihood(x, mu, log_std):\n log_gaussian_prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 - log_std - 0.5 * np.log(2 * np.pi))\n return tf.reduce_sum(log_gaussian_prob, axis=1)", "def batched_gaussian_ll(self, mean, sigma, x):\n if 0 in sigma:\n print('Zero occurs in diagonal sigma matrix. (batched gaussian ll)')\n if 0 in sigma ** 2:\n print('Zero occurs after squaring sigma matrix. (batched gaussian ll)')\n\n inv_diag_cov = diagonalise(1 / (sigma ** 2), batch=True,\n device=self.device) # a 2d batched matrix----> 3d batched diagonal tensor\n\n exp = ((x - mean).unsqueeze(-2)) @ inv_diag_cov @ ((x - mean).unsqueeze(-1)) #\n exp = exp.squeeze() # [batch]\n # print(exp)\n\n if 0 in torch.prod(sigma ** 2, dim=-1):\n print('Zero occurs when calculating determinant of diagonal covariance. (batched gaussian ll)')\n\n logdet = torch.sum(2 * torch.log(sigma), dim=-1)\n # logdet = torch.log(torch.prod(sigma**2, dim = -1)) #product of all diagonal variance for each batch, shape [batch]\n # print('logdet=', logdet)\n n = mean.size()[-1]\n\n return -(n / 2) * np.log(2 * np.pi) - 0.5 * logdet - 0.5 * exp # need double checking", "def correlated_gaussian_loglikelihood(xs, means, cov):\n lu,piv=sl.lu_factor(cov)\n\n lambdas=np.diag(lu)\n\n ndim=xs.shape[0]\n \n ds=(xs-means)*sl.lu_solve((lu,piv), xs-means)/2.0\n\n return -np.log(2.0*np.pi)*(ndim/2.0)-0.5*np.sum(np.log(lambdas))-np.sum(ds)", "def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def batched_gaussian_ll(self, mean, sigma, x):\n # mean = mean.to(self.device)\n # sigma = sigma.to(self.device)\n if 0 in sigma:\n # sigma = sigma + 1e-10\n print('Zero occurs in diagonal sigma matrix. (batched gaussian ll)')\n if 0 in sigma ** 2:\n print('Zero occurs after squaring sigma matrix. (batched gaussian ll)')\n\n inv_diag_cov = self.diagonalise(1 / (sigma ** 2),\n batch=True) # a 2d batched matrix----> 3d batched diagonal tensor\n\n exp = ((x - mean).unsqueeze(-2)) @ inv_diag_cov @ ((x - mean).unsqueeze(-1)) #\n exp = exp.squeeze() # [batch]\n # print(exp)\n\n if 0 in torch.prod(sigma ** 2, dim=-1):\n print('Zero occurs when calculating determinant of diagonal covariance. (batched gaussian ll)')\n\n logdet = torch.sum(2 * torch.log(sigma), dim=-1)\n # logdet = torch.log(torch.prod(sigma**2, dim = -1)) #product of all diagonal variance for each batch, shape [batch]\n # print('logdet=', logdet)\n n = mean.size()[-1]\n\n return -(n / 2) * np.log(2 * np.pi) - 0.5 * logdet - 0.5 * exp # need double checking", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def kl_gaussian_gaussian_mc(mu_q, logvar_q, mu_p, logvar_p, num_samples=1):\n # init\n batch_size = mu_q.size(0)\n input_size = np.prod(mu_q.size()[1:])\n mu_q = mu_q.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n logvar_q = logvar_q.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n mu_p = mu_p.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n logvar_p = logvar_p.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n\n # kld\n sigma_q = torch.sqrt(torch.exp(logvar_q))\n sigma_p = torch.sqrt(torch.exp(logvar_p))\n q_dist = torch.distributions.normal.Normal(mu_q, sigma_q)\n p_dist = torch.distributions.normal.Normal(mu_p, sigma_p)\n z = q_dist.rsample()\n q_z = q_dist.log_prob(z)\n p_z = p_dist.log_prob(z)\n kld = torch.mean(q_z - p_z, dim=(1, 2))\n return kld", "def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik" ]
[ "0.67399085", "0.6726065", "0.6718359", "0.6690975", "0.6636074", "0.6582468", "0.6578395", "0.64080966", "0.63837916", "0.6369224", "0.63582486", "0.63048327", "0.6293092", "0.6288842", "0.6270701", "0.6242738", "0.6236148", "0.61677235", "0.6166594", "0.6087776", "0.6073809", "0.60450035", "0.60420734", "0.60407466", "0.60276836", "0.60249776", "0.6000477", "0.59964347", "0.59776217", "0.5935663" ]
0.7078497
0
Sample KL between gaussian and gaussian mixture many times and average. See comments for kl_sample_gmm for full explanation.
def kl_samples_gmm(keys_sx2, q_mean_u, q_logvar_u, gmm_resps_c, gmm_p_mean_cxu, gmm_p_logvar_cxu, varmin): sample_kl = batch_samples_kl_sample_gmm kl_samples = sample_kl(keys_sx2, q_mean_u, q_logvar_u, gmm_resps_c, gmm_p_mean_cxu, gmm_p_logvar_cxu, varmin) kl = np.mean(kl_samples) return kl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kl_sample_gmm(key, q_mean_u, q_logvar_u,\n gmm_resps_c, gmm_p_mean_cxu, gmm_p_logvar_cxu, varmin):\n\n # Handle case of one gaussian in the mixture with closed form equations.\n if gmm_resps_c.shape[0] == 1:\n return np.sum(kl_gauss_gauss(q_mean_u, q_logvar_u,\n gmm_p_mean_cxu[0,:], gmm_p_logvar_cxu[0,:],\n varmin))\n\n # Otherwise sample the KL\n ll = diag_gaussian_log_likelihood\n gmm_ll = gmm_diag_gaussian_log_likelihood\n sample = diag_gaussian_sample\n keys = random.split(key, 2)\n\n z_u = sample(keys[0], q_mean_u, q_logvar_u, varmin)\n logq_u = ll(z_u, q_mean_u, q_logvar_u, varmin) # over multigauss dim\n\n assert varmin <= 1e-15, \"Very small or you need to know what you are doing.\"\n llp_each_gaussian_cxu = gmm_ll(z_u, gmm_p_mean_cxu, gmm_p_logvar_cxu, varmin)\n log_normed_resps_cx1 = np.expand_dims(log_softmax(gmm_resps_c), axis=1)\n logp_u = logsumexp(llp_each_gaussian_cxu + log_normed_resps_cx1, axis=0)\n\n kl_estimate = np.sum(logq_u - logp_u, axis=0)\n return kl_estimate", "def kl_gaussian_gaussian_mc(mu_q, logvar_q, mu_p, logvar_p, num_samples=1):\n # init\n batch_size = mu_q.size(0)\n input_size = np.prod(mu_q.size()[1:])\n mu_q = mu_q.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n logvar_q = logvar_q.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n mu_p = mu_p.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n logvar_p = logvar_p.view(batch_size, -1).unsqueeze(1).expand(batch_size, num_samples, input_size)\n\n # kld\n sigma_q = torch.sqrt(torch.exp(logvar_q))\n sigma_p = torch.sqrt(torch.exp(logvar_p))\n q_dist = torch.distributions.normal.Normal(mu_q, sigma_q)\n p_dist = torch.distributions.normal.Normal(mu_p, sigma_p)\n z = q_dist.rsample()\n q_z = q_dist.log_prob(z)\n p_z = p_dist.log_prob(z)\n kld = torch.mean(q_z - p_z, dim=(1, 2))\n return kld", "def kl_gaussian_gaussian_analytic(mu_q, logvar_q, mu_p, logvar_p):\n # init\n batch_size = mu_q.size(0)\n input_size = mu_q.size(1)\n mu_q = mu_q.view(batch_size, -1)\n logvar_q = logvar_q.view(batch_size, -1)\n mu_p = mu_p.view(batch_size, -1)\n logvar_p = logvar_p.view(batch_size, -1) \n\n # kld\n cov_q = torch.exp(logvar_q)\n cov_p = torch.exp(logvar_p)\n cov_p_inverse = 1 / cov_p\n mu_diff = mu_p - mu_q\n log_det_cov_p = torch.sum(logvar_p, dim=1)\n log_det_cov_q = torch.sum(logvar_q, dim=1)\n trace_det = torch.sum(cov_p_inverse * cov_q, dim=1)\n fourth_term = torch.sum(mu_diff * cov_p_inverse * mu_diff, dim=1)\n kl_div = 0.5 * (log_det_cov_p - log_det_cov_q - input_size + trace_det + fourth_term)\n return kl_div", "def gaussian_kl_np(mu0, log_std0, mu1, log_std1):\n var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)\n pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0\n all_kls = pre_sum\n #all_kls = np.mean(all_kls)\n all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability\n return all_kls", "def gaussian_kl_div(mean_0, cov_0, mean_1, cov_1, dim):\n mean_diff = mean_1 - mean_0\n cov_1_inv = tf.reciprocal(cov_1)\n log_cov_1_det = tf.reduce_sum(tf.log(cov_1), axis=[1])\n log_cov_0_det = tf.reduce_sum(tf.log(cov_0), axis=[1])\n log_term = log_cov_1_det - log_cov_0_det\n trace_term = tf.reduce_sum(cov_1_inv * cov_0, axis=[1])\n square_term = tf.reduce_sum(mean_diff * cov_1_inv * mean_diff, axis=[1])\n kl_div = 0.5 * (trace_term + square_term - dim + log_term)\n return kl_div", "def batch_kl_sample_gmm_pmap_pre(keys_8xbd8xsx2, z_mean_8xbd8xu,\n z_logvar_8xbd8xu, resps_c, gmm_z_mean_cxu,\n gmm_z_logvar_cxu, varmin):\n\n\n # This fun gets around jax complaining about vmap not having these parameters.\n def batch_kl_sample_gmm2(keys, z_mean, z_logvar, resps,\n gmm_z_mean, gmm_z_logvar, varmin):\n return batch_kl_sample_gmm(keys, z_mean, z_logvar, resps,\n gmm_z_mean, gmm_z_logvar, varmin)\n\n kwargs = {'resps' : resps_c,\n 'gmm_z_mean' : gmm_z_mean_cxu,\n 'gmm_z_logvar' : gmm_z_logvar_cxu, 'varmin' : varmin}\n batch_samples_kl_sample_pre_pmap = partial(batch_kl_sample_gmm2, **kwargs)\n\n pmap_samples = pmap(batch_samples_kl_sample_pre_pmap)\n\n kl_samples_8xbd8 = pmap_samples(keys_8xbd8xsx2, z_mean_8xbd8xu, z_logvar_8xbd8xu)\n kl = np.mean(kl_samples_8xbd8)\n return kl", "def factorised_kl_gaussian(dist1_mean,\n dist1_covariance_or_scale,\n dist2_mean,\n dist2_covariance_or_scale,\n both_diagonal=False):\n if both_diagonal:\n dist1_mean_rank = dist1_mean.get_shape().ndims\n dist1_covariance_or_scale.get_shape().assert_has_rank(dist1_mean_rank)\n dist2_mean_rank = dist2_mean.get_shape().ndims\n dist2_covariance_or_scale.get_shape().assert_has_rank(dist2_mean_rank)\n\n dist_type = tfp.distributions.MultivariateNormalDiag\n else:\n dist_type = tfp.distributions.MultivariateNormalFullCovariance\n\n # Recreate the distributions but with stop gradients on the mean and cov.\n dist1_stop_grad_mean = dist_type(\n tf.stop_gradient(dist1_mean), dist1_covariance_or_scale)\n dist2 = dist_type(dist2_mean, dist2_covariance_or_scale)\n\n # Now create a third distribution with the mean of dist1 and the variance of\n # dist2 and appropriate stop_gradients.\n dist3 = dist_type(dist1_mean, dist2_covariance_or_scale)\n dist3_stop_grad_mean = dist_type(\n tf.stop_gradient(dist1_mean), dist2_covariance_or_scale)\n\n # Finally get the two components of the KL between dist1 and dist2\n # using dist3\n kl_mean = tfp.distributions.kl_divergence(dist3, dist2)\n kl_cov = tfp.distributions.kl_divergence(dist1_stop_grad_mean,\n dist3_stop_grad_mean)\n return kl_mean, kl_cov", "def KL_divergence(model_1, model_2, samples):\n posterior_1 = create_posterior_object(model_1, samples)\n posterior_2 = create_posterior_object(model_2, samples)\n return posterior_1.KL(posterior_2)", "def sample_k(items, L, k, max_nb_iterations=None, rng=np.random):\n #import pdb; pdb.set_trace()\n # if L is infinite (some dims of the space are continuous)\n sample_continuous = type(L) == type({})\n\n print_debug = False\n\n if max_nb_iterations is None:\n import math\n max_nb_iterations = 5*int(len(L)*math.log(len(L)))\n \n if not sample_continuous: \n X = sample_discrete_L(L,k,rng,items)\n else:\n initial = sample_continuous_L(L,k)\n\n\n # if Y has very close to zero determinant, resample it\n num_Y_resampled = 0\n tolerance = 10**-100\n while det_X(X, L) < tolerance:\n initial = rng.choice(range(len(items)), size=k, replace=False)\n X = [False] * len(items)\n for i in initial:\n X[i] = True\n X = np.array(X)\n num_Y_resampled += 1\n if num_Y_resampled > (1.0/2)*len(L):\n print(\"We've tried to sample Y such that L_Y is invertible (has det(L_Y) > 0)\" + \n \" but after {} samples we didn't find any with det(L_Y) > {}.\".format(\n (1.0/2)*len(L),tolerance))\n raise ZeroDivisionError(\"The matrix L is likely low rank => det(L_Y) = 0.\")\n\n if print_debug:\n numerator_counter = 0\n denom_counter = 0\n num_neg_counter = 0\n denom_neg_counter = 0\n p_neg_counter = 0\n both_neg_counter = 0\n \n steps_taken = 0\n num_Y_not_invert = 0\n for i in range(max_nb_iterations):\n \n u = rng.choice(np.arange(len(items))[X])\n v = rng.choice(np.arange(len(items))[~X])\n Y = X.copy()\n Y[u] = False\n L_Y = L[Y, :]\n L_Y = L_Y[:, Y]\n\n # to check determinants\n if print_debug:\n Y_cur = X.copy()\n L_Y_cur = L[Y_cur,:]\n L_Y_cur = L_Y_cur[:,Y_cur]\n \n Y_next = X.copy()\n Y_next[u] = False\n Y_next[v] = True\n L_Y_next = L[Y_next,:]\n L_Y_next = L_Y_next[:,Y_next]\n\n \n try:\n L_Y_inv = np.linalg.inv(L_Y)\n except:\n num_Y_not_invert += 1\n continue\n #import pdb; pdb.set_trace()\n\n\n c_v = L[v:v+1, :]\n c_v = c_v[:, v:v+1]\n b_v = L[Y, :]\n b_v = b_v[:, v:v+1]\n c_u = L[u:u+1, :]\n c_u = c_u[:, u:u+1]\n b_u = L[Y, :]\n b_u = b_u[:, u:u+1]\n\n\n numerator = c_v - np.dot(np.dot(b_v.T, L_Y_inv.T), b_v)\n denom = c_u - np.dot(np.dot(b_u.T, L_Y_inv.T), b_u)\n\n if print_debug:\n if numerator < 0 and denom > 0:\n num_neg_counter += 1\n if numerator < 10**-9:\n numerator_counter += 1\n if denom < 0 and numerator > 0:\n denom_neg_counter += 1\n if denom < 10**-9:\n denom_counter += 1\n \n if numerator < 0 and denom < 0:\n both_neg_counter += 1\n\n\n p = min(1, numerator/denom)\n \n # to print if we have some problems with small or zero determinants / eigenvalues\n if print_debug:\n if numerator < 0 or denom < 0 or p < 0:\n print i, p, numerator, denom#u, v, [j for j, b_var in enumerate(Y) if b_var]\n print(\"{}\\t->\\t{}\".format(np.linalg.det(L_Y_cur), np.linalg.det(L_Y_next)))\n print(\"steps taken so far: {}, {}%\".format(steps_taken, round(100.0*steps_taken/i,3)))\n #import pdb; pdb.set_trace()\n\n if rng.uniform() <= p:\n steps_taken += 1\n X = Y[:]\n X[v] = True\n \n if print_debug:\n \n print(\"{}\\t->\\t{}\".format(np.linalg.det(L_Y_cur),np.linalg.det(L_Y_next)))\n\n if print_debug:\n print(\"num numerators that would be rounded to zero: {}\".format(numerator_counter))\n print(\"num denoms that would be rounded to zero: {}\".format(denom_counter))\n print(\"num_neg_counter: {}\".format(num_neg_counter))\n print(\"denom_neg_counter: {}\".format(denom_neg_counter))\n print(\"both_neg_counter: {}\".format(both_neg_counter))\n print(\"steps taken: {}\".format(steps_taken))\n \n\n if num_Y_not_invert > .5 * max_nb_iterations:\n print(\"We've tried to sample Y such that L_Y is invertible (has det(L_Y) > 0)\" + \n \" but after {} potential mcmc steps, we found L_Y not invertible {} times.\".format(\n .5 * max_nb_iterations, num_Y_not_invert))\n raise ZeroDivisionError(\"The matrix L is likely low rank => det(L_Y) = 0.\")\n\n if steps_taken == 0:\n print(\"We ran the MCMC algorithm for {} steps, but it never accepted a metropolis-hastings \" + \n \"proposal, so this is just a uniform sample.\".format(steps_taken))\n raise ZeroDivisionError(\"It's likely the matrix L is bad. The MCMC algorithm failed.\")\n\n print(\"{} steps taken by mcmc algorithm, out of {} possible steps. {}%\".format(steps_taken, \n max_nb_iterations, 100.0*steps_taken/max_nb_iterations))\n return np.array(items)[X]", "def gmm(X, k):\n mix = sklearn.mixture.GaussianMixture(n_components=k).fit(X)\n pi = mix.weights_\n m = mix.means_\n S = mix.covariances_\n clss = mix.predict(X)\n bic = mix.bic(X)\n\n return pi, m, S, clss, bic", "def sampling_latent(self):\n \n self.z = gaussian_sample(self.qz_m, self.qz_v)\n if self.scalings:\n self.library = gaussian_sample(self.ql_m, self.ql_v)", "def gmm_sample(key, resps_c, means_c, logvar_c, varmin=1e-16):\n keys = random.split(key, 2)\n # pick gaussian to sample\n u = random.uniform(keys[0])\n cum_resps_c = np.cumsum(softmax(resps_c))\n cidx = np.argmax(u <= cum_resps_c)\n # sample that gaussian\n return diag_gaussian_sample(keys[1], means_c[cidx], logvar_c[cidx], varmin)", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def rand_k(self, k):\n\n k_N = self.prior.k_0 + self.counts[k]\n v_N = self.prior.v_0 + self.counts[k]\n m_N = self.m_N_numerators[k]/k_N\n S_N = self.S_N_partials[k] - k_N*np.square(m_N)\n\n mean = np.zeros(self.D)\n var = np.zeros(self.D)\n\n for i in range(self.D):\n var[i] = invchisquared_sample(v_N, S_N[i]/v_N, 1)[0]\n mean[i] = np.random.normal(m_N[i], np.sqrt(var[i]/k_N))\n\n return mean, var", "def KL_function_gaussian(self, H, data):\n\t\t#Initialize stuff\n\t\tn_data = float(len(data))\n\t\tn_dim = float(len(H))\n\t\t\n\t\t#Calculate the KL function by iterating\n\t\tKL_func = 0.\n\t\tfor i in xrange(int(n_data)):\n\t\t\tKL_func += np.log10( -0.9999999999999999 + np.sum( np.exp(-0.5 * np.sum( ((data[i,:] - data[:,:])/H[:])**2., axis=-1)), axis=-1) )\n\t\tKL_func /= n_data\n\t\tKL_func -= np.log10( (n_data-1.) * np.sqrt( (2.*np.pi)**n_dim * np.prod(H[:])**2.) )\n\n\t\treturn KL_func", "def gmm_clustering(X, K):\n\n # Initialization:\n pi = []\n mu = []\n cov = []\n for k in range(K):\n pi.append(1.0 / K)\n mu.append(list(np.random.normal(0, 0.5, 2)))\n temp_cov = np.random.normal(0, 0.5, (2, 2))\n temp_cov = np.matmul(temp_cov, np.transpose(temp_cov))\n cov.append(list(temp_cov.reshape(4)))\n #print(pi)\n ### you need to fill in your solution starting here ###\n X = np.array(X) \n num_data = len(X) #number of data points\n # Run 100 iterations of EM updates\n for t in range(100):\n like = np.zeros((num_data,1))\n post = np.zeros((K, num_data)) #stores posterior for all the classes - each row corresponding to a class k (k=1:K)\n for k in range(K):\n mu_k = np.array(mu[k]).reshape(1,2)\n #print(mu_k.shape)\n #print(X.shape)\n cov_k = np.array(cov[k]).reshape(2,2)\n #print(cov_k.shape)\n pi_k = pi[k]\n logpx_k = []\n for sample in X:\n logpx_samp = - 0.5*(np.dot(sample - mu_k, np.dot(np.linalg.inv(cov_k),np.transpose(sample - mu_k)))) - np.log(2*np.pi) - np.log(np.sqrt(np.linalg.det(cov_k))) + np.log(pi_k)\n #print(logpx_k)\n logpx_k.append(logpx_samp[0][0]) \n logpx_k = np.array(logpx_k)\n #print(logpx_k.shape)\n #print(logpx_k)\n explog_k = np.exp(logpx_k)\n #print(explog_k.shape)\n #print(post.shape)\n post[k] = explog_k\n like = np.sum(post, axis=0)\n #print(like.shape)\n #print(post.shape)\n post_nrm = post\n\n mu_new = []\n cov_new = []\n N = 0\n Nk_ls = []\n for k in range(K):\n post_nrm[:][k] = post[:][k] / like #posterior for all the classes\n \n #compute new parameters\n Nk = np.sum(post_nrm[:][k])\n #print(Nk.shape)\n N += Nk\n Nk_ls.append(Nk)\n mu_k_new = np.dot(post_nrm[:][k], X) / Nk\n mu_new.append(list(mu_k_new))\n #print(post_nrm[:][k].shape)\n cov_k_new = np.dot(np.multiply(np.transpose(X - mu_k_new), post_nrm[:][k]), X - mu_k_new) / Nk\n cov_new.append(list(cov_k_new.reshape(4)))\n\n pi_new = Nk_ls / N\n #update parameters for the next iteration \n pi = pi_new\n mu = mu_new\n cov = cov_new\n return mu, cov", "def batch_kl_sample_gmm_pmap(keys_bxsx2, z_mean_bxu, z_logvar_bxu, resps_c,\n gmm_z_mean_cxu, gmm_z_logvar_cxu, varmin):\n ndevs = 8\n B, S, _ = keys_bxsx2.shape\n keys_8xbd8xsx2 = np.reshape(keys_bxsx2, (ndevs, B // ndevs, S, 2))\n z_mean_8xbd8xu = np.reshape(z_mean_bxu, (ndevs, B // ndevs, -1))\n z_logvar_8xbd8xu = np.reshape(z_logvar_bxu, (ndevs, B // ndevs, -1))\n\n # Shard the memory, note the type that goes in vs comes out of the pmap'd lambda.\n keys_8xbd8xsx2 = pmap(lambda x : x)(keys_8xbd8xsx2)\n z_mean_8xbd8xu = pmap(lambda x: x)(z_mean_8xbd8xu)\n z_logvar_8xbd8xu = pmap(lambda x: x)(z_logvar_8xbd8xu)\n\n return batch_kl_sample_gmm_pmap_pre(keys_8xbd8xsx2, z_mean_8xbd8xu, z_logvar_8xbd8xu,\n resps_c, gmm_z_mean_cxu,\n gmm_z_logvar_cxu, varmin)", "def sample_gmm(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int,\n scaler: Dict[str, Union[pd.Series, xarray.Dataset]]) -> Dict[str, torch.Tensor]:\n setup = _SamplingSetup(model, data, \"gmm\")\n\n # force model into train mode if mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # make predictions:\n pred = model(data)\n\n # sample for different frequencies:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # initialize sample_points tensor for sampling:\n sample_points = torch.zeros((setup.batch_size_data, frequency_last_n, setup.number_of_targets, n_samples))\n sample_points *= torch.tensor(float('nan')) # set initial sample_points to nan\n\n # GMM has 3 parts: means (m/mu), variances (s/sigma), and weights (p/pi):\n m, s, p = pred[f'mu{freq_suffix}'], \\\n pred[f'sigma{freq_suffix}'], \\\n pred[f'pi{freq_suffix}']\n\n for nth_target in range(setup.number_of_targets):\n m_target = _subset_target(m[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n s_target = _subset_target(s[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n p_target = _subset_target(p[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n\n mask_nan = ~torch.isnan(m_target[:, -1, 0])\n if any(mask_nan): # skip if the complete mini-batch is invalid\n m_sub = torch.repeat_interleave(m_target[mask_nan, :, :], n_samples, dim=0)\n s_sub = torch.repeat_interleave(s_target[mask_nan, :, :], n_samples, dim=0)\n p_sub = torch.repeat_interleave(p_target[mask_nan, :, :], n_samples, dim=0)\n\n # sample values, handle negatives and add to sample points:\n values = _sample_gaussian_mixtures(np.ones(s_sub.shape, dtype=bool), m_sub, s_sub, p_sub)\n values = _handle_negative_values(\n setup.cfg,\n values,\n sample_values=lambda ids: _sample_gaussian_mixtures(ids, m_sub, s_sub, p_sub),\n scaler=scaler,\n nth_target=nth_target)\n values = values.view(-1, n_samples, frequency_last_n).permute(0, 2, 1)\n\n sample_points[mask_nan, :, nth_target, :] = values.detach().cpu()\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: sample_points})\n return samples", "def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples", "def test_optimalk_random_sample_data():\n from sklearn.datasets.samples_generator import make_blobs\n from gap_statistic import random_sample_data\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n random_data = random_sample_data(X, random_sampling='uniform')\n assert random_data.shape == X.shape, \"check sampled data shape {} vs original data shape {}\".format(X.shape, random_data.shape) \n\n random_data = random_sample_data(X, random_sampling='gaussian')\n assert random_data.shape == X.shape, \"check sampled data shape {} vs original data shape {}\".format(X.shape, random_data.shape)", "def kl_gauss_gauss(q_mean, q_logvar, p_mean, p_logvar, varmin=1e-16):\n q_logvar = np.log(np.exp(q_logvar) + varmin)\n p_logvar = np.log(np.exp(p_logvar) + varmin)\n return (0.5 * (p_logvar - q_logvar + np.exp(q_logvar - p_logvar)\n + np.square((q_mean - p_mean) / np.exp(0.5 * p_logvar)) - 1.0))", "def gaussian_kld(mu_left, logvar_left, mu_right, logvar_right):\n gauss_klds = 0.5 * (logvar_right - logvar_left +\n (tf.exp(logvar_left) / tf.exp(logvar_right)) +\n ((mu_left - mu_right)**2.0 / tf.exp(logvar_right)) - 1.0)\n assert len(gauss_klds.shape) == 2\n return tf.reduce_sum(gauss_klds, axis=1)", "def gibbs_sample(self, X):\n K = self.K # number of topics\n M, V = X.shape\n alpha = self.alpha\n lmda = self.lmda\n topics = np.arange(0, K)\n\n #initialize everything uniformly, sparse topics\n Beta = np.ones(shape=(K, V), dtype=float) / V\n Theta = np.ones(shape=(M, K), dtype=float)/ K\n\n #Running sum\n MC_z = np.array(range(M), dtype=object)\n MC_beta = np.zeros(shape=(K, V), dtype=float)\n MC_theta = np.zeros(shape=(M, K), dtype=float)\n\n for d in range(M):\n #allocate topics randomly -- this is really not needed in this case\n word_indices = X[d, :].nonzero()[1]\n random_ks = np.random.choice(topics, size = len(word_indices))\n Theta[d] = np.random.dirichlet(np.ones(K)*alpha)\n MC_z[d] = sp.coo_matrix((V, K), dtype=np.int8).tolil()\n for k in topics:\n Beta[k] = np.random.dirichlet(np.ones(V)*lmda)\n\n log_Xs = []\n perplexities = []\n t = 0\n for epoch in xrange(self.nr_em_epochs):\n print \"Epoch\", epoch\n t +=1\n C = np.zeros((K, V))\n for d in np.random.permutation(np.arange(M)):\n x = X[d]\n ixw = np.nonzero(x)[1]\n p_s = Beta[:, ixw].T * Theta[d, :]\n Z = [np.random.choice(topics, p=(p/np.sum(p))) for p in p_s]\n N_d = sp.coo_matrix((np.ones(len(ixw)), (ixw, Z)), shape=(V, K)).tolil()\n C = C + N_d.A.T\n # sample theta given z and beta\n c_theta = (np.sum(N_d.A, axis=0) + alpha)\n Theta[d, :] = np.random.dirichlet(c_theta)\n MC_z[d] += N_d\n\n # Sample beta given all z and thetas\n for k in topics:\n c_beta = C[k, :]\n Beta[k, :] = np.random.dirichlet(c_beta + lmda)\n\n MC_theta += Theta\n MC_beta += Beta\n\n log_X = 0\n Theta_hat = MC_theta / t\n Beta_hat = MC_beta / t\n\n for d in range(M):\n ixw = np.nonzero(X[d, :])[1]\n log_X += np.sum(_doc_probability_from_p_of_z(Theta_hat[d, :], Beta_hat[:, ixw]))\n\n log_Xs.append(log_X)\n print log_X\n perplexities.append(self._perplexity(X, log_X))\n return Theta_hat, Beta_hat, log_Xs, perplexities", "def kl_divergence(self, samples):\n # Check size of input\n if not len(samples.shape) == 2:\n raise ValueError('Given samples list must be n x 2.')\n if samples.shape[1] != self._n_parameters:\n raise ValueError(\n 'Given samples must have length ' + str(self._n_parameters))\n\n best_mode = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n a_sample = samples[i, :]\n a_log_pdf = -np.inf\n a_max_index = -1\n for j, var in enumerate(self._vars):\n a_test_log_pdf = var.logpdf(a_sample)\n if a_test_log_pdf > a_log_pdf:\n a_log_pdf = a_test_log_pdf\n a_max_index = j\n best_mode[i] = a_max_index\n\n kl = np.zeros(len(self._vars))\n for i in range(len(self._vars)):\n y = np.array(samples[best_mode == i, :], copy=True)\n # when a mode has no points use all samples\n if y.shape[0] == 0:\n y = np.array(samples, copy=True)\n m0 = np.mean(y, axis=0)\n s0 = np.cov(y.T)\n s1 = self._covs[i]\n m1 = self._modes[i]\n s1_inv = np.linalg.inv(s1)\n if len(np.atleast_1d(s0)) > 1:\n kl[i] = 0.5 * (\n np.trace(np.matmul(s1_inv, s0)) +\n np.matmul(np.matmul(m1 - m0, s1_inv), m1 - m0) -\n np.log(np.linalg.det(s0)) +\n np.log(np.linalg.det(s1)) -\n self._n_parameters)\n else:\n kl[i] = 0.5 * (\n np.sum(s1_inv * s0) +\n (m1 - m0) * s1_inv * (m1 - m0) -\n np.log(s0) +\n np.log(s1) -\n 1)\n return kl", "def _kld_gauss(self, mean_1, std_1, mean_2, std_2):\n kld_element = (2 * torch.log(std_2) - 2 * torch.log(std_1) + (std_1.pow(2) + (mean_1 - mean_2).pow(2)) / std_2.pow(2) - 1)\n return\t0.5 * torch.sum(kld_element)", "def kl(self):\n weights_logvar = self.weights_logvar\n kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\\n (weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (\n 2 * self.prior_stdv.pow(2)) - 0.5\n kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\\n (self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (\n 2 * self.prior_bias_stdv.pow(2)) \\\n - 0.5\n return kld_weights.sum() + kld_bias.sum()", "def gkern(l, sig=1.):\n\n ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n\n return kernel / np.sum(kernel)", "def kl_unit_gaussian(mu, log_sigma, sigma):\n with tf.name_scope('kl_unit_gaussian') as scope:\n return -0.5 * (1 + 2 * log_sigma - mu**2 - sigma**2)", "def iBk(self, sigmasqgL=0.0, variance=False):\n if self.powerspectrum==None:\n self.average_ps()\n\n self.iBkmean=np.array([])\n self.iBksigma=np.array([])\n self.iBkdata=[]\n\n if (sigmasqgL==0.0):\n sigmasqL=np.var(self.ds)\n else:\n sigmasqL=sigmasqgL\n\n # NEED WEIGHTED AVERAGE?\n for i in range(0, len(self.klist)):\n if not(variance):\n iBkdat=np.array([self.powerspectra[j][i]*self.ds[j]/self.powerspectrum[i]/sigmasqL for j in range(self.Nsubs)])\n else:\n iBkdat=np.array([self.powerspectra[j][i]*self.ds[j]*self.ds[j]/np.power(self.powerspectrum[i], 1.0)/sigmasqL for j in range(self.Nsubs)])\n\n self.iBkmean=np.append(self.iBkmean, np.mean(iBkdat))\n self.iBksigma=np.append(self.iBksigma, np.sqrt(np.var(iBkdat)))\n self.iBkdata.append(iBkdat)\n\n # get <P(k) \\bar{phi}> correlation per subvolume -- this does not weigh the higher k more.\n self.iBksubs = np.mean(self.iBkdata, axis=0)", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))" ]
[ "0.8097279", "0.7002909", "0.6963438", "0.6866689", "0.63551277", "0.6180908", "0.6121434", "0.60961425", "0.591781", "0.59088385", "0.5886838", "0.58702284", "0.5850443", "0.58495057", "0.5838542", "0.5781357", "0.57350236", "0.57018584", "0.5690899", "0.56901944", "0.5672653", "0.56587017", "0.5657946", "0.5653072", "0.5637077", "0.5615725", "0.55799264", "0.5569594", "0.55685866", "0.55563384" ]
0.74375826
1
Seeks to each file in an archive and yields its size.
def files_in_archive(fd: BinaryIO) -> Iterable[int]: _check_next_bytes(fd, ARCHIVE_MAGIC, 'archive magic number') while True: # In some archives, the first file ends with an additional \n. If that # is present, skip it. if fd.read(1) != b'\n': fd.seek(-1, 1) # Each file in an archive is prefixed with an ASCII header: # # 16 B - file identifier (text) # 12 B - file modification timestamp (decimal) # 6 B - owner ID (decimal) # 6 B - group ID (decimal) # 8 B - file mode (octal) # 10 B - file size in bytes (decimal) # 2 B - ending characters (`\n) # # Skip the unused portions of the file header, then read the size. fd.seek(16 + 12 + 6 + 6 + 8, 1) size_str = fd.read(10) if not size_str: return try: size = int(size_str, 10) except ValueError as exc: raise FileDecodeError( 'Archive file sizes must be decimal integers') from exc _check_next_bytes(fd, b'`\n', 'archive file header ending') offset = fd.tell() # Store offset in case the caller reads the file. yield size fd.seek(offset + size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_package_iter(self):\n files = list()\n futures = list()\n\n amount = 0\n for file in self.file_iterator:\n if amount + self._estimate_file_size(file) > self.max_size:\n if len(files) == 0: # This file is too large for one archive, special handling\n self.pool.wait(futures)\n self._calculate_hash(file)\n yield self._finish_info_package([file])\n continue\n\n self.pool.wait(futures)\n yield self._finish_info_package(files)\n\n files = list()\n amount = 0\n\n amount += file.size\n files.append(file)\n futures.append(self.pool.add_task(self._calculate_hash, file)) # todo calc small files in-thread?\n\n if len(files) > 0:\n yield self._finish_info_package(files)", "async def get_archive_file_count(self, *, include_dirs=True):\n\n self.LOGGER << \"counting files\"\n if self.archive_files is None:\n # we've not attempted to list the archive before\n return len([f async for f in self.archive_contents(dirs=include_dirs)])\n else:\n if include_dirs:\n return len(self.archive_dirs) + len(self.archive_files)\n return len(self.archive_files)\n # return len(await self.archive_contents(dirs=include_dirs))", "def run(self):\r\n filesizes = {}\r\n # Build up dict with key as filesize and value is list of filenames.\r\n for path, dirs, files in walk( self._path ):\r\n for filename in files:\r\n filepath = joinpath( path, filename )\r\n filesize = stat( filepath ).st_size\r\n filesizes.setdefault( filesize, [] ).append( filepath )\r\n\r\n\r\n #Compare content hash of all files which have the same size\r\n #if two or more files have same hash and size they are added to the queue \r\n for files in [ flist for flist in filesizes.values() if len(flist)>1 ]:\r\n #run over all files in dir with the same size if there is more then one\r\n duplicates = {}\r\n for filepath in files:\r\n with open( filepath ) as openfile:\r\n filehash = md5( openfile.read() ).hexdigest()\r\n if filehash not in duplicates:\r\n duplicates.setdefault(filehash, []).append (filepath)\r\n else:\r\n duplicates[filehash].append(filepath)\r\n for duplicate in [ duplicate for duplicate in duplicates.values() if len(duplicate)>1 ]:\r\n self._queue.put(duplicate)\r\n self._finished_scan[0] = 1", "def sizes(self, fileids=None, categories=None):\n # Resolve the fileids and the categories\n\n # Create a generator, getting every path and computing filesize\n for path in self.abspaths(self.fileids()):\n yield os.path.getsize(path)", "def all_files_size():\n size = 0\n for dirpath, _dirnames, filenames in os.walk('images'):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n size += os.path.getsize(fp)\n return size", "def get_files1(dirname, size_in_kb):\n for file in glob.glob(os.path.join(dirname, \"*\")):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def _file_iter(f, size):\n chunk = f.read(size)\n while chunk:\n yield chunk\n chunk = f.read(size)", "async def count_folder_contents(self, folder):\n self.LOGGER << f\"Counting contents of archive folder {folder!r}\"\n\n folder = folder.rstrip('/') + '/'\n\n c = 0\n async for f in self.archive_contents():\n if f.startswith(folder): c+=1\n\n return c\n # return len(\n # [f async for f in self.archive_contents()\n # if f.startswith(folder)])", "def molecollector(archive):\n totaltimeosstat = 0\n for root, dirs, files in os.walk(archive):\n for filename in files:\n if filename.endswith('.metadata'):\n pathname = os.path.abspath(os.path.join(root, filename))\n # strip off actual datestamps NOT OS ctimes --ctimes messed up\n # in *nix\n starttimeosstat = time.time()*1000\n totaltimeosstat = endtimeosstat - starttimeosstat\n endtimeosstat = time.time()*1000\n totaltimeosstat += totaltimeosstat\n # return multiple values\n yield (os.stat(pathname).st_ctime, (pathname,totaltimeosstat))\n ## January 25, 2013 --parsing too expensive\n \"\"\"pubdate = oaipmhstripper(pathname, 'datestamp')\n # normalise date to appropriate format\n pubdate = datetime.strptime(pubdate.replace(\"T\", \" \").replace(\"Z\", \"\"), '%Y-%m-%d %H:%M:%S')\n try:\n #yield os.stat(pathname).st_ctime, pathname\n yield pathname, pubdate\n except os.error as details:\n print \"Handling error: \", details\"\"\"", "def get_size(files):\n somesize = 0\n for f in files:\n somesize += int(f.get('file_size'))\n return somesize", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def quickScanZip(args, fh):\n # 100 bytes is the smallest .zip possible\n\n fh.seek(0, 2)\n fsize = fh.tell()\n if fsize==0:\n print(\"Empty file\")\n return\n if fsize<100:\n print(\"Zip too small: %d bytes, minimum zip is 100 bytes\" % fsize)\n return\n fh.seek(-100, 2)\n\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n # try with larger chunk\n ofs = max(fh.tell()-0x10100, 0)\n fh.seek(ofs, 0)\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n print(\"expected PK0506 - probably not a PKZIP file\")\n return\n else:\n ofs = fh.tell()-0x100\n eod = EndOfCentralDir(ofs, eoddata, iEND+4)\n yield eod\n\n dirofs = eod.dirOffset\n for _ in range(eod.thisEntries):\n fh.seek(dirofs)\n dirdata = fh.read(46)\n if dirdata[:4] != b'PK\\x01\\x02':\n print(\"expected PK0102\")\n return\n dirent = CentralDirEntry(dirofs, dirdata, 4)\n\n yield dirent\n dirofs = dirent.endOffset", "def _du_using_walk(self):\n\n num = 0\n size = 0\n\n for dirpath, dirnames, files in walk(self.path):\n for f in files:\n size += getsize(join(dirpath, f))\n num += len(files)\n return Du.Result(num, size)", "def _iterate_over_files(self):\n stats = Statistics()\n\n args = arguments.Args()\n\n for file in args.files:\n\n if isimage(file):\n before_size = stats.calculate_before_optimization(file)\n\n puts(\"%s %s\" % (\n e(\"==>\"),\n os.path.basename(file))\n )\n\n if \"--lossy\" in args.flags:\n Optimize.lossy(file)\n if \"--lossless\" in args.flags:\n Optimize.lossless(file)\n after_size = stats.calculate_after_optimization(file)\n\n puts(\"%s %s (%s)\" % (\n p(\"<==\"),\n os.path.basename(file),\n s(after_size) if after_size < before_size else after_size\n ))\n\n stats.show_statistics()", "def fileCounter(directory):", "def dir_size(directory: str) -> int:\n size = 0\n for file in os.listdir(directory):\n filename = os.path.join(directory, file)\n size += os.path.getsize(filename)\n return size", "def size(self) -> int:\n size = 0\n for file in self.files.values():\n size += file.size\n\n return size", "def get_big_files(self, size_threshold=10):\n for f in self.filelist:\n if f[\"size_bytes\"] > size_threshold*(1024*1024):\n yield f[\"size_bytes\"]/(1024*1024), f[\"mime\"], f[\"filename\"]", "def processSetOfCerFiles(files):\n printHeader()\n \n k = 0\n for f in files:\n k = k + 1\n sz = get_file_size(f)\n with open(f, 'rb') as fb:\n processCerFile(k, fb, sz=sz)", "def size(**kwargs):\n mpath = kwargs['path']\n if not os.path.exists(mpath):\n print(\"Invalid path\")\n sys.exit(-1)\n\n # Basic Counter variables\n foldercount = 0\n count = 0\n\n # List containing the collected information\n elist = []\n\n # Indices for the 2 dimensional list\n iext = 0\n icount = 1\n icsums = 2\n imins = 3\n imaxs = 4\n\n start_depth = len(mpath.split('/')) - 2\n depth = 0\n\n for root, dirs, files in os.walk(mpath, topdown=True):\n\n indircount = 0\n for name in files:\n pathfile = os.path.join(root, name)\n indircount += 1\n # Extension\n ext = (os.path.splitext(name)[1]).lower()[1:]\n if ext == '': ext = 'no ext'\n # Size\n size = os.stat(pathfile).st_size\n\n # Folder depth\n cdepth = len(os.path.abspath(pathfile).split('/')) - start_depth\n if depth < cdepth: depth = cdepth\n\n # Getting the index of the current file extension using python built-in functions\n try:\n index = list(zip(*elist))[iext].index(ext)\n except IndexError:\n # The list is empty\n index = -1\n except ValueError:\n # The list doesn't contain the extension\n index = -1\n\n if index >= 0:\n elist[index][icount] += 1\n elist[index][icsums] += size\n if size < elist[index][imins]: elist[index][imins] = size\n if size > elist[index][imaxs]: elist[index][imaxs] = size\n\n else: # Adding the new extension in the list\n elist.append([ext, 1, size, size, size])\n count += indircount\n\n # Updating the directory count\n for name in dirs:\n foldercount += 1\n\n # Mapping arguments with indices in the list\n dict = {\n 'ext': iext,\n 'count': icount,\n 'size': icsums\n }\n\n # Sorting the list\n elist.sort(key=lambda x: x[dict.get(kwargs['sort'])], reverse=not kwargs['asc'])\n\n print(\"%d files in %d folders max depth: %s\\n\" % (count, foldercount, depth))\n if kwargs['human']:\n print(f\"{'Ext.':<8}{'Count':<13}{'Total':<10}{'Min':<11}{'Max':<13}{'Avg':<9}\")\n for l in elist:\n print(f\"{l[iext]:<7} {l[icount]:<12,d} {sizeformat(l[icsums]):<9} {sizeformat(l[imins]):<10} \\\n{sizeformat(l[imaxs]):<12} {sizeformat(l[icsums] / l[icount]):<9}\")\n else:\n print(f\"{'Ext.':<8}{'Count':<13}{'Total':<13}{'Min':<13}{'Max':<13}{'Avg':<2}\")\n for l in elist:\n print(f\"{l[iext]:<7} {l[icount]:<12,d} {l[icsums]:<12} {l[imins]:<12} {l[imaxs]:<12} \\\n{int(round(l[icsums] / l[icount], 0)):<12}\")", "def __iter__(self):\n for f in self.path.glob('**/*'):\n if f.is_file() and not os.stat(str(f.resolve())).st_size == 0:\n yield Resource(str(f.resolve()), DiskCrawler.compute_digest)", "def size(path):", "def inspect(apath):\n\n files = []\n\n def inspect_into(curr_apath):\n handler = resolve_format(curr_apath)\n unpacker = HandlersFactory.get_handler(handler)\n _files = unpacker.files_list(curr_apath)\n for f in _files:\n # go into nested archive or compressed file\n if is_archive(f):\n _apath = unpacker.extract(curr_apath, f)\n inspect_into(_apath)\n\n else:\n files.append(f)\n\n inspect_into(apath)\n return files", "def test(self, archive, files):\n self.log.info(\"Opening archive %s for testing.\" % (archive))\n tmpdir = tempfile.mkdtemp(dir='/tmp/')\n tar = tarfile.open(archive, 'r:gz')\n tar.extractall(tmpdir);\n tar.close()\n\n mtime = {}\n for file in files:\n mtime[file] = os.path.getmtime(file)\n # compare each file to its archived copy\n self.log.debug(\"Comparing files %s and %s\" % (file, tmpdir))\n diffdir = filecmp.dircmp(file, os.path.join(tmpdir, file));\n diffdir.report_full_closure();\n status = {}\n for i in ('left_only', 'right_only', 'diff_files'):\n if len(diffdir.__dict__[i]) > 0:\n status[i] = len(diffdir.__dict__[i])\n if len(status) > 0:\n raise RuntimeError (\"Archive contents do not match source files.\")\n return mtime", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def contents(archive_id):\n files = config.index.files(archive_id=archive_id)\n print_files(files)", "def large_files(folder):\n for foldername, subfolders, filenames in os.walk(folder):\n for filename in filenames:\n target_file = foldername + '/' + filename\n if os.path.getsize(target_file) > 100000000: # 100000000 = 100MB\n print(f'{target_file}: {str(os.path.getsize(target_file))} bytes')", "def apply_to_all_files(basedir,func=lambda x: x,ext='.h5'):\n cnt = 0\n # iterate over all files in all subdirectories\n for root, dirs, files in os.walk(basedir):\n files = glob.glob(os.path.join(root,'*'+ext))\n # count files\n cnt += len(files)\n # apply function to all files\n for f in files :\n func(f)\n \n# if cnt > 2000:\n# break\n \n return cnt" ]
[ "0.66372603", "0.6201361", "0.62012774", "0.6083263", "0.60582364", "0.5977691", "0.59540606", "0.5874965", "0.58564705", "0.5831442", "0.5831299", "0.5737863", "0.57123107", "0.5696928", "0.56868017", "0.56639326", "0.5650282", "0.5596777", "0.55928975", "0.55735", "0.55707496", "0.5562862", "0.5551554", "0.5548775", "0.5545686", "0.5519619", "0.54956424", "0.54949415", "0.54826003", "0.54747516" ]
0.7121256
0
Reads a nullterminated string from the provided file descriptor.
def read_c_string(fd: BinaryIO) -> bytes: string = bytearray() while True: byte = fd.read(1) if not byte or byte == b'\0': return bytes(string) string += byte
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def netstring_readfd(fd, max_length=0):\n read_func = lambda length: os.read(fd, length)\n return _netstring_read(read_func, max_length)", "def read_string(stream, size):\n\t\n\tvalue = ''\n\tif size > 0:\n\t\tvalue = stream.read(size)\n\t\tvalue = value.partition(chr(0))[0]\n\treturn value", "def read_str(self) -> str:\n t = self.pc\n while self.data[self.pc] != 0:\n self.pc += 1\n s = str(self.data[t:self.pc], encoding=\"utf8\")\n self.pc += 1 # jump '\\0'\n return s", "def read_string(self):\n size = self.read_int32()\n\n if size == 0:\n return \"\"\n\n is_unicode = size < 0\n\n if is_unicode:\n size *= -2\n return self.read_bytes(size)[:-2].decode('utf-16')\n\n stream_bytes = self.read_bytes(size)\n string = stream_bytes[:-1]\n if stream_bytes[-1] != 0:\n raise ReadStringException('End of string not zero')\n\n try:\n return string.decode('utf-8')\n except UnicodeDecodeError:\n return string.decode('latin-1')", "def ReadFixedString(self, length):\n return self.ReadBytes(length).rstrip(b'\\x00')", "def read_string(data, s_len):\n return struct.unpack(\"=%ds\" % s_len, data.read(s_len))[0].decode(\"utf-8\")", "def netstring_read(f, max_length=0):\n read_func = f.read\n return _netstring_read(read_func, max_length)", "def read_string(self):\n\n # length may be -1, 0, or a positive integer\n length = self.read_and_unpack('l')[0]\n if length > 0:\n return self.read(length).decode(self.utf_16_decoder)\n else:\n return ''", "def _read_string(bs):\n result = bs.readto('0x00', bytealigned=True).bytes.decode(\"utf-8\")[:-1]\n return result if result else None", "def readString(stream):\n # read the string length (4-byte int, network byte order)\n buf = stream.read(4)\n if len(buf) < 4:\n raise RuntimeError(\"found %d bytes (expected: 4)\" % len(buf))\n n_bytes = struct.unpack(\"!i\", buf)[0]\n if n_bytes < 0:\n return None\n buf = stream.read(n_bytes)\n if len(buf) < n_bytes:\n raise RuntimeError(\"found %d bytes (expected: %d)\" % (\n len(buf), n_bytes\n ))\n return unicode(buf, 'UTF-8')", "def _skip_cstring(input):\n while True:\n c = input.read(1)\n if not c or c == b\"\\0\":\n return", "def read_string(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'bytes':\n # Schema Resolution: promote to byte string\n return stream.read(size)\n else:\n return stream.read(size).decode('utf-8')", "def read_utf8_string(self, length):\n return self.read(length).decode(\"utf-8\")", "def read_unicode_string(stream, size):\n\t\n\tvalue = u''\n\tif size > 0:\n\t\tdata = stream.read(size)\n\t\tdata = data.partition(chr(0))[0]\n\t\tvalue = unicode(data, 'utf_8')\n\treturn value", "def read_string(self):\n self._skip_white_space()\n return self._get_string()", "def read(self, length=None):\r\n try:\r\n if length is not None:\r\n return self._fp.read(length)\r\n return self._fp.readline()\r\n except socket.error, e:\r\n self.disconnect()\r\n if e.args and e.args[0] == errno.EAGAIN:\r\n raise ConnectionError(\"Error while reading from socket: %s\" % \\\r\n e.args[1])\r\n return ''", "def readNetstring(sock):\n # First attempt to read the length.\n size = ''\n while True:\n try:\n c = sock.recv(1)\n except socket.error, e:\n if e[0] == errno.EAGAIN:\n select.select([sock], [], [])\n continue\n else:\n raise\n if c == ':':\n break\n if not c:\n raise EOFError\n size += c\n\n # Try to decode the length.\n try:\n size = int(size)\n if size < 0:\n raise ValueError\n except ValueError:\n raise ProtocolError, 'invalid netstring length'\n\n # Now read the string.\n s, length = recvall(sock, size)\n\n if length < size:\n raise EOFError\n\n # Lastly, the trailer.\n trailer, length = recvall(sock, 1)\n\n if length < 1:\n raise EOFError\n\n if trailer != ',':\n raise ProtocolError, 'invalid netstring trailer'\n\n return s", "def readString(self) -> str:\n length = self._unpack('i', 4)\n\n return self._unpack('{:d}s'.format(length), length)", "def _decode_string(fp):\n return fp.read(_decode_short(fp)).decode('utf-8') or None", "def checked_read(in_stream, length, allow_eof=False):\n\n bytes = in_stream.read(length)\n if allow_eof and bytes == '':\n return bytes\n if len(bytes) != length:\n raise IOError(MSG_INCOMPLETE_READ)\n return bytes", "def read_str(self, timeout = 0):\n len = self.read_uint32(timeout)\n return self.read(len, timeout)", "def ReadString(self):\n length = self.ReadUInt8()\n return self.unpack(str(length) + 's', length)", "def _qstring(f):\n \n length_header = np.uint32(struct.unpack('I', f.read(4)))[0]\n if length_header == int('ffffffff' , 16): #ffffffff specifies a null string\n return\n if length_header == 0: #0 is an empty string\n return ''\n string = f.read(length_header)\n \n #decoding hack, enables dictionary call by ascii (rather than hex)\n ascii = \"\".join(list(string)[::2])\n \n return ascii", "def read_string(fobj, endian=''):\n nchar = struct.unpack(endian + 'i', fobj.read(4))[0]\n strng = struct.unpack(endian + str(nchar) + 's', fobj.read(nchar))[0]\n return strng.decode('utf-8')", "def _readline(self) -> Text:\n try:\n return self.stream.readline().decode(\"utf-8\").strip()\n except OSError: # pragma: no cover\n return \"\"", "def read_string(fobj, endian=''):\n (nchar,) = struct.unpack(endian + 'i', fobj.read(4))\n (string,) = struct.unpack(endian + str(nchar) + 's', fobj.read(nchar))\n return string", "def getStr_c(s):\n try:\n return s[:s.index('\\x00')]\n except:\n return s", "def read_string(stream, indent=INDENT):\n values = streambyte_to_int(stream, 0)\n return '\"' + ''.join(map(chr, values[:-1])) + '\"\\n' \\\n + (' ' * indent) + 'HEX: ' + (', '.join(map(hex, values)))", "def _get_string():\n result = sys.stdin.readline().rstrip('\\n')\n return result", "def read_fd_decode_safely(fd, size=4096):\n data = os.read(fd.fileno(), size)\n for _ in range(3):\n try:\n return data, data.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n if e.reason != \"unexpected end of data\":\n raise\n data += os.read(fd.fileno(), 1)\n\n return data, data.decode(\"utf-8\")" ]
[ "0.68129843", "0.64159715", "0.60958624", "0.6073359", "0.6067506", "0.60660225", "0.6050458", "0.60482794", "0.6025416", "0.5936739", "0.58037686", "0.57951295", "0.57093495", "0.56588453", "0.56097895", "0.5587004", "0.557562", "0.5544384", "0.5524973", "0.551961", "0.5466532", "0.54555917", "0.54505235", "0.54481524", "0.54072356", "0.5378944", "0.5357808", "0.53536135", "0.5352589", "0.53304183" ]
0.74095106
0
Peeks at the next bytes to see if they match the expected.
def _bytes_match(fd: BinaryIO, expected: bytes) -> bool: try: offset = fd.tell() data = fd.read(len(expected)) fd.seek(offset) return data == expected except IOError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasNextByte(self) -> bool:\n raise NotImplementedError", "def any_equal_block(b: bytes) -> bool:\n b = [b[i:i + 16] for i in range(0, len(b), 16)]\n return len(set(b)) != len(b)", "def _get_next_packet(self):\n offset_check = self.packet_counter * CsvAbstractReader.BUFFER_SIZE\n header = {'Range': 'bytes={}-{}'.format(offset_check, offset_check + CsvAbstractReader.BUFFER_SIZE - 1)}\n try:\n packet = self.s3_file.get_contents_as_string(headers=header, encoding='utf-8')\n return True, packet\n except:\n return False, \"\"", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def peek_byte(self):\n try:\n return self._buffer[self.pos]\n except IndexError:\n raise self._eof", "def utf8_lead_byte(b):\n\treturn (ord(b) & 0xC0) != 0x80", "def read_until(steg_bytes: bytes, offset: int, ending: str):\r\n # Create a variable to hold the bytes read\r\n bytes_read = b\"\"\r\n\r\n # Loop through the steg_bytes\r\n while offset < len(steg_bytes):\r\n # Check if the current byte is the ending byte sequence\r\n if steg_bytes[offset:offset + len(ending)] == ending.encode():\r\n # Return the bytes read and the offset of the ending byte sequence\r\n return bytes_read, offset\r\n # Read the next byte\r\n bytes_read += steg_bytes[offset:offset + 1]\r\n offset += 1", "def AllConsumed(self):\n return self.NumBits() <= (8*self.idx_byte + self.idx_boff)", "def test_peek(self):\n server, client = loopback()\n server.send(b\"xy\")\n assert client.recv(2, MSG_PEEK) == b\"xy\"\n assert client.recv(2, MSG_PEEK) == b\"xy\"\n assert client.recv(2) == b\"xy\"", "def confirm_next(self, seq):\n for n, i in enumerate(seq):\n try:\n if self.items[self.pos + n] != i:\n return False\n except IndexError:\n return False\n return True", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def test_utf8_bytes(self):\n # Python3 doesn't support bytestrings, don't run this test\n if str is unicode:\n return\n input = \"A r\\xc3\\xa9sum\\xc3\\xa9, also spelled resum\\xc3\\xa9 or resume\"\n output = input.split(\" \")\n output[1] = output[1][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))", "def validUTF8(data):\n for start_byte in data:\n ones = count_ones(start_byte)\n if ones in (1, 7, 8):\n return False\n for i in range(ones - 1):\n end_byte = data[i]\n if end_byte is None or end_byte >> 6 != 0b10:\n return False\n return True", "def test_given_cmp_function_bytes_fails(cls):\n with pytest.raises(ValueError) as err:\n orderedstructs.SkipList(bytes, lambda x, y: x < y)\n assert err.value.args[0] == \\\n 'Can not specify comparison function with type \"bytes\".'", "def test_peek_returns_value(full_deque):\n assert full_deque.peek() == 1", "def detect(byte_string):\n\n if not isinstance(byte_string, byte_cls):\n raise TypeError(unwrap(\n '''\n byte_string must be a byte string, not %s\n ''',\n _type_name(byte_string)\n ))\n\n return byte_string.find(b'-----BEGIN') != -1 or byte_string.find(b'---- BEGIN') != -1", "def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()", "def testGetOne(self):\n data = b'0123456789'\n inst = WireData(data)\n for i, byte in enumerate(bytearray(data)):\n self.assertEqual(inst[i], byte)\n for i in range(-1, len(data) * -1, -1):\n self.assertEqual(inst[i], bytearray(data)[i])", "def validUTF8(data):\n\n pattern1 = 1 << 6\n pattern = 1 << 7\n byte_num = 0\n\n for i in data:\n pattern_byte = 1 << 7\n\n if byte_num == 0:\n while pattern_byte & i:\n byte_num += 1\n pattern_byte = pattern_byte >> 1\n\n if byte_num == 0:\n continue\n\n if byte_num == 1 or byte_num > 4:\n return False\n else:\n if not (i & pattern and not (i & pattern1)):\n return False\n\n byte_num -= 1\n\n if byte_num == 0:\n return True\n\n return False", "def test_compress_offset_less_len2(self):\n text = 'abcdabcdab'\n actual = LZ77.compress(text)\n expected = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n self.assertEqual(actual, expected)", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def test_utf8_bytes_at_end(self):\n # Python3 doesn't support bytestrings, don't run this test\n if str is unicode:\n return\n input = \"A r\\xc3\\xa9sum\\xc3\\xa9, also spelled resum\\xc3\\xa9 or resume\"\n output = input.split(\" \")\n output[1] = output[1][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])", "def test_ioerror_buffer_position(self):\n bytes = pyamf.encode(u'foo', [1, 2, 3], encoding=pyamf.AMF3).getvalue()\n\n self.buf.write(bytes[:-1])\n self.buf.seek(0)\n\n self.decoder.readElement()\n self.assertEqual(self.buf.tell(), 5)\n\n self.assertRaises(IOError, self.decoder.readElement)\n self.assertEqual(self.buf.tell(), 5)", "def validate_response(self, response):\n crypted = response[-0x100:]\n # check that not all values are the same\n if all(v == crypted[0] for v in crypted):\n return False\n # return if chunks of 0x10 repeat\n return (len([True for i in range(0x10, len(crypted), 0x10)\n if crypted[:0x10] == crypted[i:i+0x10]])) == 0xf", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def is_record(buf, offset):\n\n if len(buf) < offset + 8:\n return False\n\n magic, size = struct.unpack_from(\"<II\", buf, offset)\n if magic != 0x00002a2a:\n return False\n\n if not (0x30 <= size <= 0x10000):\n return False\n\n if len(buf) < offset + size:\n return False\n\n size2 = struct.unpack_from(\"<I\", buf, offset + size - 4)[0]\n if size != size2:\n return False\n\n return True", "def _is_last_chunk(self, bytes_read, previous_read):\n return bytes_read == previous_read and bytes_read != 0", "def skip(self):\r\n length = self.next_byte()\r\n while length != b\"\\x00\" and length:\r\n self.next_bytes(parse_int(length, 'big'))\r\n length = self.next_byte()", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]" ]
[ "0.61394197", "0.58342236", "0.572061", "0.5706022", "0.5685208", "0.5665679", "0.5650298", "0.56094944", "0.55495685", "0.5529557", "0.55083495", "0.549685", "0.5470431", "0.539034", "0.5367962", "0.53483605", "0.5337731", "0.53350985", "0.5328996", "0.53174484", "0.53155714", "0.531389", "0.5279826", "0.52612334", "0.5243381", "0.5235981", "0.52177584", "0.5214633", "0.5207653", "0.5199506" ]
0.6764772
0
Returns a dict of structs used for converting bytes to integers.
def _determine_integer_format(self) -> Dict[int, struct.Struct]: endianness_byte = self._elf.read(1) # e_ident[EI_DATA] (endianness) if endianness_byte == b'\x01': endianness = '<' elif endianness_byte == b'\x02': endianness = '>' else: raise FileDecodeError( 'Unknown endianness {!r}'.format(endianness_byte)) return { 1: struct.Struct(endianness + 'B'), 2: struct.Struct(endianness + 'H'), 4: struct.Struct(endianness + 'I'), 8: struct.Struct(endianness + 'Q'), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dict_of_int2(self):\n pass", "def to_int(data):\n return {int(k): data[k] for k in sorted(data.keys())}", "def decode_map(as_bytes: typing.List[int]) -> dict:\n raise NotImplementedError()", "def get_dict_of_bytes2(self):\n pass", "def decd(binary):\n result = {}\n while binary:\n (key, dtype, length) = struct.unpack('>HBH', binary[:5])\n value = dec_elem(dtype, binary[5:5 + length])\n if value is not None:\n result[key] = (dtype, value)\n binary = binary[5 + length:]\n return result", "def parse(bits: int, value: bytes, endianness: str):\n offset = 2**(bits - 1)\n\n signed_f, unsigned_f = SIZES[bits]\n\n signed_v = struct.unpack(f'{endianness}{signed_f}', value)[0]\n unsigned_v = struct.unpack(f'{endianness}{unsigned_f}', value)[0]\n signed_to_unsigned_v = signed_v + offset\n unsigned_to_signed_v = unsigned_v - offset\n\n return {\n 'offset': offset,\n 'signed': signed_v,\n 'unsigned': unsigned_v,\n 'signed_to_unsigned': signed_to_unsigned_v,\n 'unsigned_to_signed': unsigned_to_signed_v\n }", "def decode(cls, buffer: bytes) -> Dict[str, Any]:\n pstruct = Struct()\n pstruct.ParseFromString(buffer)\n dictionary = dict(pstruct)\n cls._patch_dict_restore(dictionary)\n return dictionary", "def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}", "def to_struct(self):\n return {\"_class\": self.__class__.__name__,\n \"value\": self.__value, \n \"string\": cuni(self)}", "def convert_bytes_to_ints(in_bytes, num):\n dt = numpy.dtype('>i' + str(num))\n return numpy.frombuffer(in_bytes, dt)", "def bytes_to_int(obj):\n return functools.reduce(lambda x, y: x << 8 | y, obj)", "def mmtf_bytes_to_mmtf_dict(bytestring):\n\n raw = msgpack.unpackb(bytestring)\n return decode_dict(raw)", "def _bitfields_to_ints(bit_state, vrs):\n int_state = dict()\n for var, dom in vrs.items():\n if dom == 'boolean':\n int_state[var] = bit_state[var]\n continue\n bitnames = ['{var}@{i}'.format(var=var, i=i)\n for i in range(dom[1].bit_length())]\n bitnames[0] = '{var}@0.{min}.{max}'.format(\n var=var, min=dom[0], max=dom[1])\n bitvalues = [bit_state[b] for b in bitnames]\n # little-endian\n val = int(''.join(str(b) for b in reversed(bitvalues)), 2)\n int_state[var] = val\n return int_state", "def _fill_cdata(cls):\n\n funcs = {}\n for key, name in [(\"b\", \"char\"), (\"h\", \"short\"),\n (\"i\", \"int\"), (\"q\", \"longlong\")]:\n for echar, esuffix in [(\"<\", \"le\"), (\">\", \"be\")]:\n esuffix = \"_\" + esuffix\n for unsigned in [True, False]:\n s = struct.Struct(echar + (key.upper() if unsigned else key))\n get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]\n unpack = get_wrapper(s.unpack)\n unpack_from = get_wrapper(s.unpack_from)\n\n def get_unpack_from(s):\n def unpack_from(data, offset=0):\n return s.unpack_from(data, offset)[0], offset + s.size\n return unpack_from\n\n unpack_from = get_unpack_from(s)\n pack = s.pack\n\n prefix = \"u\" if unsigned else \"\"\n if s.size == 1:\n esuffix = \"\"\n bits = str(s.size * 8)\n\n if unsigned:\n max_ = 2 ** (s.size * 8) - 1\n min_ = 0\n else:\n max_ = 2 ** (s.size * 8 - 1) - 1\n min_ = - 2 ** (s.size * 8 - 1)\n\n funcs[\"%s%s_min\" % (prefix, name)] = min_\n funcs[\"%s%s_max\" % (prefix, name)] = max_\n funcs[\"%sint%s_min\" % (prefix, bits)] = min_\n funcs[\"%sint%s_max\" % (prefix, bits)] = max_\n\n funcs[\"%s%s%s\" % (prefix, name, esuffix)] = unpack\n funcs[\"%sint%s%s\" % (prefix, bits, esuffix)] = unpack\n funcs[\"%s%s%s_from\" % (prefix, name, esuffix)] = unpack_from\n funcs[\"%sint%s%s_from\" % (prefix, bits, esuffix)] = unpack_from\n funcs[\"to_%s%s%s\" % (prefix, name, esuffix)] = pack\n funcs[\"to_%sint%s%s\" % (prefix, bits, esuffix)] = pack\n\n for key, func in iteritems(funcs):\n setattr(cls, key, staticmethod(func))", "def dict() -> Dict[str, Pin]:", "def parse_bytes_to_dict(bytes_to_parse):\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))", "def _decode_compound(fp):\n values = {}\n tag_type = ord(fp.read(1))\n while tag_type > 0:\n name = _decode_string(fp)\n values[name] = _MAP[tag_type](fp)\n tag_type = ord(fp.read(1))\n return values", "def struct_dict(struct):\n get_pair = lambda field_type: (\n field_type[0], getattr(struct, field_type[0]))\n return dict(list(map(get_pair, struct._fields_)))", "def decode(bytes, command):\n\n cmap = com_map[command]\n ret = {}\n for com, rng in cmap.items():\n ret[com] = sum([ord(b)<<(8*i) for i,b in\n enumerate(bytes[rng[0]-1:rng[1]])])\n\n return ret", "def decode_dict(state):\n new_state = dict()\n for k, v in state.items():\n if v.decode().isnumeric():\n new_state[k.decode()] = int(v)\n else:\n new_state[k.decode()] = v.decode()\n return new_state", "def unmarshal_int(b):\n return int.from_bytes(b, byteorder='little', signed=True)", "def to_num_dict (dict):\n output = {}\n for key, value in dict.items():\n output[int(key)] = value\n return output", "def to_dict (self):\n return {\n 'lengths': self.lengths,\n 'lowerCounts': self.lower_counts,\n 'upperCounts': self.upper_counts,\n 'digitCounts': self.digit_counts,\n 'symbolCounts': self.symbol_counts,\n 'classCounts': self.class_counts,\n 'wordCounts': self.word_counts\n }", "def bytes_to_int(bs):\n v = 0\n p = 0\n for b in reversed(bs):\n v += b * (2 ** p)\n p += 8\n return v", "def parse_binary_field(b):\n\n\n codec, length, params = struct.unpack(\">iii\", b[:12])\n len4 = lambda b: int(len(b[12:]) / 4)\n if codec == 1: return struct.unpack(\"f\" * length, b[12:])\n elif codec == 2: return struct.unpack(\"b\" * length, b[12:])\n elif codec == 3: return struct.unpack(\">\" + \"h\" * length, b[12:])\n elif codec == 4: return struct.unpack(\">\" + \"i\" * length, b[12:])\n elif codec == 5:\n chars = struct.unpack(\"c\" * (length * 4), b[12:])\n return [b\"\".join([\n c for c in chars[i * 4: (i + 1) * 4] if c != b\"\\x00\"\n ]).decode() for i in range(length)]\n elif codec == 6:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [chr(c) if c != 0 else \"\" for c in run_length_decode(integers)]\n elif codec == 7:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return run_length_decode(integers)\n elif codec == 8:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return delta_decode(run_length_decode(integers))\n elif codec == 9:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [n / params for n in run_length_decode(integers)]\n elif codec == 10:\n integers = struct.unpack(\">\" + (\"h\" * int(len(b[12:]) / 2)), b[12:])\n return [n / params for n in delta_decode(recursive_decode(integers))]\n else: raise ValueError(\".mmtf error: {} is invalid codec\".format(codec))", "def parse_bytes_stream_from_message(msg: bytes,\n length_bytes: int,\n code_bytes: int\n ) -> Dict:\n\n code = int.from_bytes(msg[length_bytes:\n length_bytes + code_bytes],\n byteorder)\n data = msg[length_bytes + code_bytes:]\n\n return {\"code\": code,\n \"data\": data}", "def dec(binary):\n result = []\n while binary:\n (key, dtype, length) = struct.unpack('>HBH', binary[:5])\n value = dec_elem(dtype, binary[5:5 + length])\n if value is not None:\n result.append((key, dtype, value))\n binary = binary[5 + length:]\n return result", "def b2i(data, order='big'):\n return int.from_bytes(data, order)", "def to_dict(self):\n config = {'min_length': self.min_length, 'max_length': self.max_length}\n return {'node_type': 'Bytes', 'config': config}", "def multiparse(bits: int, value: bytes):\n return {\n 'msb': parse(bits, value, '>'),\n 'lsb': parse(bits, value, '<')\n }" ]
[ "0.68280524", "0.6609406", "0.6602328", "0.6484319", "0.6250697", "0.6064725", "0.60590786", "0.59368396", "0.5923957", "0.588018", "0.58635813", "0.58271354", "0.5778409", "0.5752742", "0.57432646", "0.5733445", "0.5670739", "0.56411225", "0.5570107", "0.5546325", "0.5546187", "0.5537303", "0.5514733", "0.5499401", "0.54802257", "0.5453027", "0.54418", "0.54356354", "0.54342604", "0.54294956" ]
0.7155724
0
Reads the section headers to enumerate all ELF sections.
def _list_sections(self) -> Iterable['Elf.Section']: for _ in _elf_files_in_archive(self._elf): reader = FieldReader(self._elf) base = reader.read(FILE_HEADER.section_header_offset) section_header_size = reader.offset( SECTION_HEADER.section_header_end) # Find the section with the section names in it. names_section_header_base = ( base + section_header_size * reader.read(FILE_HEADER.section_names_index)) names_table_base = reader.read(SECTION_HEADER.section_offset, names_section_header_base) base = reader.read(FILE_HEADER.section_header_offset) for _ in range(reader.read(FILE_HEADER.section_count)): name_offset = reader.read(SECTION_HEADER.section_name_offset, base) yield self.Section( reader.read_string(names_table_base + name_offset), reader.read(SECTION_HEADER.section_address, base), reader.read(SECTION_HEADER.section_offset, base), reader.read(SECTION_HEADER.section_size, base), reader.file_offset) base += section_header_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sections(self, elf_file_path):\n section_names = []\n try:\n output = subprocess.check_output([self.readelf_path, '-SW', elf_file_path])\n output = bytes_to_str(output)\n for line in output.split('\\n'):\n # Parse line like:\" [ 1] .note.android.ident NOTE 0000000000400190 ...\".\n result = re.search(r'^\\s+\\[\\s*\\d+\\]\\s(.+?)\\s', line)\n if result:\n section_name = result.group(1).strip()\n if section_name:\n section_names.append(section_name)\n except subprocess.CalledProcessError:\n pass\n return section_names", "def get_sections(self, unsafe = False):\n sect_size = self.obj_vm.profile.get_obj_size(\"_IMAGE_SECTION_HEADER\")\n start_addr = self.FileHeader.SizeOfOptionalHeader + self.OptionalHeader.obj_offset\n\n for i in range(self.FileHeader.NumberOfSections):\n s_addr = start_addr + (i * sect_size)\n sect = obj.Object(\"_IMAGE_SECTION_HEADER\", offset = s_addr, vm = self.obj_vm,\n parent = self, native_vm = self.obj_native_vm)\n \n ## deal with swapped sections...\n if not sect:\n continue\n \n if not unsafe:\n sect.sanity_check_section()\n yield sect", "def parse_section_header(data, elf_header):\n if elf_header[\"shoff\"] == 0:\n print \" No section header\"\n return None\n \n if is64bit(elf_header):\n section_entry_str = section_64_entry_str\n section_entry_spec = section_64_entry_spec\n else:\n section_entry_str = section_32_entry_str\n section_entry_spec = section_32_entry_spec\n \n entry_len = struct.calcsize(section_entry_str)\n entries = {}\n offset = elf_header[\"shoff\"] \n for entry in range(elf_header[\"shnum\"]):\n vals = {}\n if len(data) < offset+entry_len:\n break\n val_data = struct.unpack(section_entry_str, data[offset:offset+entry_len]) \n for i, elem in enumerate(section_entry_spec):\n vals[elem[0]] = val_data[i] \n \n vals[\"flags\"] = get_section_flags(vals[\"flags\"])\n vals[\"type\"] = get_section_type(vals[\"type\"])\n \n entries[entry] = vals\n offset += entry_len\n \n if not entries:\n return {}\n \n sections = assign_section_names(data, entries, elf_header[\"shstrndx\"])\n return sections", "def read (path, elf_info):\n ehdr, phdrs, shdrs, syms, core_info = elf_info\n info = abbrev = strings = None\n for shdr in shdrs:\n if shdr['name'] == '.debug_info':\n info = shdr['offset'], shdr['size']\n if shdr['name'] == '.debug_abbrev':\n abbrev = shdr['offset'], shdr['size']\n if shdr['name'] == '.debug_str':\n strings = shdr['offset'], shdr['size']\n if not info:\n return []\n else:\n abbrevs = abbrev_section (path, abbrev[0], abbrev[1])\n if strings:\n strings = string_section (path, strings[0], strings[1])\n info = info_section (path, info[0], info[1])\n return info.read_all (abbrevs, strings)", "def read(self):\r\n entById = {}\r\n entsByName = {}\r\n header = 'HEADER '\r\n readheader = False\r\n for line in self.file:\r\n e = self.parseLine(line)\r\n if e:\r\n entById[int(e[\"id\"])] = e\r\n ids = e.get(e[\"name\"],[])\r\n ids.append(e[\"id\"])\r\n entsByName[e[\"name\"]] = list(set(ids))\r\n elif 'HEADER' in line:\r\n readheader = True\r\n elif readheader:\r\n if 'ENDSEC' in line:\r\n readheader = False\r\n else:\r\n header += line\r\n \r\n return [entById, entsByName, header]", "def read_sections(config: Config, ef: ELFFile) -> SectionDF:\n columns = ['section', 'type', 'address', 'size', 'flags', 'segment']\n index = []\n rows = []\n for i, section in enumerate(ef.iter_sections()):\n index.append(i)\n segment_number = -1\n for j, segment in enumerate(ef.iter_segments()):\n if segment.section_in_segment(section):\n segment_number = j\n break\n rows.append([\n section.name,\n elftools.elf.descriptions.describe_sh_type(section['sh_type']),\n section['sh_addr'], section['sh_size'], section['sh_flags'],\n segment_number\n ])\n return SectionDF(rows, index=index, columns=columns)", "def load_sections():\n pass", "def _readheaderlines(f):\n hdrlines = []\n for i in range(0,26):\n hdrlines.append(f.readline())\n return hdrlines", "def get_phdrs(pointer):\n ei_class, Elfhdr = get_ehdr(pointer)\n\n if Elfhdr is None:\n return (0, 0, None)\n\n phnum = Elfhdr.e_phnum\n phoff = Elfhdr.e_phoff\n phentsize = Elfhdr.e_phentsize\n\n x = (phnum, phentsize, read(Phdr, Elfhdr.address + phoff))\n return x", "def _getSections(self):\r\n\r\n sections = self.cf.sections()\r\n return sections", "def sections(self) -> Iterable[int]:\n return self._sections.keys()", "def sections(self):\n if self._sections is None:\n self._load()\n return self._sections", "def read_hdr_file(self, rawfilename):\n\n # Get the filename without path or extension\n filename = os.path.basename(rawfilename)\n filesplit = os.path.splitext(filename)\n filebase = filesplit[0]\n dirname = os.path.dirname(rawfilename)\n\n # See if we can find the header file to use\n if os.path.isfile(os.path.join(dirname, filebase + '.hdr')):\n hdrfilename = os.path.join(dirname, filebase + '.hdr')\n elif os.path.isfile(os.path.join(dirname, filename + '.hdr')):\n hdrfilename = os.path.join(dirname, filename + '.hdr')\n else:\n raise IOError('Could not find coresponding header file')\n\n hdrfile = open(hdrfilename, 'r')\n output = collections.OrderedDict()\n inblock = False\n\n # Read line, split it on equals, strip whitespace from resulting strings\n # and add key/value pair to output\n for currentline in hdrfile:\n # ENVI headers accept blocks bracketed by curly braces - check for these\n if not inblock:\n # Split line on first equals sign\n if re.search('=', currentline) is not None:\n linesplit = re.split('=', currentline, 1)\n # Convert all values to lower case\n key = linesplit[0].strip().lower()\n value = linesplit[1].strip()\n\n # If value starts with an open brace, it's the start of a block\n # - strip the brace off and read the rest of the block\n if re.match('{', value) is not None:\n inblock = True\n value = re.sub('^{', '', value, 1)\n\n # If value ends with a close brace it's the end\n # of the block as well - strip the brace off\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = value\n else:\n # If we're in a block, just read the line, strip whitespace\n # (and any closing brace ending the block) and add the whole thing\n value = currentline.strip()\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = output[key] + value\n\n hdrfile.close()\n\n return output", "def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)", "def section_list(self):\n return self._config_parser.sections()", "def parse_program_header(data, elf_header):\n if elf_header[\"phoff\"] == 0:\n print \" No program header\"\n return None\n\n if is64bit(elf_header):\n segment_entry_str = segment_64_entry_str\n segment_entry_spec = segment_64_entry_spec\n else:\n segment_entry_str = segment_32_entry_str\n segment_entry_spec = segment_32_entry_spec \n\n entry_len = struct.calcsize(segment_entry_str)\n offset = elf_header[\"phoff\"] \n segments = {}\n for entry in range(elf_header[\"phnum\"]):\n vals = {}\n val_data = struct.unpack(segment_entry_str, data[offset:offset+entry_len]) \n for i, elem in enumerate(segment_entry_spec):\n vals[elem[0]] = val_data[i] \n \n vals[\"type\"] = get_segment_type(vals[\"type\"])\n vals[\"flags\"] = get_segment_flags(vals[\"flags\"])\n \n segments[entry] = vals\n offset += entry_len\n \n return segments", "def _iter_CUs_in_section(stream, structs, parser):\n stream.seek(0, os.SEEK_END)\n endpos = stream.tell()\n stream.seek(0, os.SEEK_SET)\n\n offset = 0\n while offset < endpos:\n header = struct_parse(parser, stream, offset)\n if header.offset_count > 0:\n offset_parser = structs.Dwarf_uint64 if header.is64 else structs.Dwarf_uint32\n header['offsets'] = struct_parse(Array(header.offset_count, offset_parser('')), stream)\n else:\n header['offsets'] = False\n yield header\n offset = header.offset_after_length + header.unit_length", "def get_section_names(self):\n return list(self._sections.keys())", "def check_pe_sections(self, pe):\n res = []\n for section in pe.sections:\n if b\"!This program cannot be run in DOS mode\" in section.get_data()[:400] or\\\n b\"This program must be run under Win32\" in section.get_data()[:400]:\n res.append(section.Name.decode('utf-8').strip('\\x00'))\n\n if len(res) > 0:\n print(\"[+] PE header in sections %s\" % \" \".join(res))\n return True\n return False", "def _sectionheader(self):\n header = SectionHeader()\n header.crc = self.reader.readint(2)\n header.id = self.reader.readint(2)\n header.len = self.reader.readint(4)\n header.versnr = self.reader.readint(1)\n header.protnr = self.reader.readint(1)\n header.reserved = self.reader.reads(6)\n if header.reserved:\n header.reserved = header.reserved.replace('\\x00', '')\n\n return header", "def get_all(self, cmd_names, section, env=DEFAULT_ENV):\n\n env = env or DEFAULT_ENV\n\n self._read()\n return self.document[env][self._to_key(cmd_names)][section]", "def get_sections(h):\n secnames = {}\n resec = re.compile('(\\w+)\\[(\\d*)\\]')\n for sec in h.allsec():\n g = resec.match(sec.name())\n if g.group(1) not in secnames.keys():\n secnames[g.group(1)] = [int(g.group(2))]\n else:\n secnames[g.group(1)].append(int(g.group(2)))\n return secnames", "def iter_sections(self):\n # We'll be keeping track of a few pieces of state.\n #\n # This is a list of sections considered valid at each iteration.\n # We start by looking for the main \"#diffx:\" section. Every section\n # we process will rebuild this list, using a list of valid sections\n # defined in VALID_SECTION_STATES, keyed off by the current section\n # ID.\n valid_sections = {Section.MAIN}\n\n # This is a stack of encodings. Every time we go up a container\n # section, or go to a container section at the same level, we'll pop\n # the last value off the stack. We'll then add the new encoding\n # (either defined on the section or inherited from a parent section)\n # onto the stack.\n encodings = [None]\n\n # The last container section level from a previous iteration.\n prev_container_level = 0\n\n while True:\n section = self._read_header(valid_sections=valid_sections)\n\n if section is None:\n # We've read the last section. We're done parsing.\n break\n\n level = section['level']\n linenum = section['line']\n options = section['options']\n section_id = section['section']\n\n if section_id in CONTENT_SECTIONS:\n # This is a content section.\n encoding = options.get('encoding', encodings[-1])\n\n try:\n length = options['length']\n except KeyError:\n raise DiffXParseError(\n 'Expected section \"%s\" to have a length option'\n % section_id,\n linenum=linenum)\n\n if section_id in PREAMBLE_SECTIONS:\n # This is a preamble section.\n #\n # Read the content and decode it using the current\n # encoding (defined either on this section or in a parent).\n section['text'] = self._read_content(\n length=length,\n encoding=encoding,\n indent=options.get('indent'),\n line_endings=options.get('line_endings'))\n elif section_id in META_SECTIONS:\n # This is a metadata section.\n #\n # Validate the format as JSON (either explicitly provided\n # as format=json, or left off entirely).\n metadata_format = options.get('format', 'json')\n\n if metadata_format != 'json':\n raise DiffXParseError(\n 'Unexpected metadata format \"%(format)s\". If the '\n '\"format\" option is provided, it must be \"json\".'\n % {\n 'format': metadata_format,\n },\n linenum=linenum)\n\n # Read the content and decode it using the current encoding\n # (defined either on this section or in a parent).\n content = self._read_content(\n length=length,\n encoding=encoding,\n line_endings=options.get('line_endings'))\n\n try:\n section['metadata'] = json.loads(content)\n except ValueError as e:\n raise DiffXParseError(\n 'JSON metadata could not be parsed: %s' % e,\n linenum=linenum)\n else:\n assert section_id == Section.FILE_DIFF\n\n # This is a diff section.\n #\n # Read the content. If an explicit encoding is provided,\n # decode it to a Unicode string. Otherwise, it will remain\n # a byte string.\n #\n # Encodings for diffs aren't inherited from parent\n # sections.\n section['diff'] = self._read_content(\n length=length,\n encoding=options.get('encoding'),\n line_endings=options.get('line_endings'),\n preserve_trailing_newline=True,\n keep_bytes=True)\n else:\n # This is a container section.\n if section_id == Section.MAIN:\n # This is the main DiffX section, which we'll encounter\n # only once.\n #\n # Validate the DiffX version.\n diffx_version = options.get('version')\n\n if diffx_version not in SpecVersion.VALID_VALUES:\n raise DiffXParseError(\n 'The DiffX version in this file (%s) is not '\n 'supported by this version of the diffx module'\n % diffx_version,\n linenum=section['line'])\n else:\n # This is either the change or file section.\n assert section_id in (Section.CHANGE, Section.FILE)\n\n if level <= prev_container_level:\n # We're at the same section level (change -> change,\n # or file -> file), or we went back up a level\n # (file -> change). Pop off the last encoding from\n # the stack before we push a new encoding onto it.\n encodings.pop()\n\n # Push a newly-specified encoding (if in the options) or the\n # parent section's encoding on the stack.\n encodings.append(options.get('encoding', encodings[-1]))\n\n prev_container_level = level\n\n # Set the new list of valid exceptions allowed at this stage of\n # parsing.\n valid_sections = VALID_SECTION_STATES[section_id]\n\n # Pass that section up to the caller for processing.\n yield section", "def get_config_main_sections(self):\n self.sections_in_config = self.config_handle.sections()", "def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)", "def read_header(self, fcs, data_offset=0):\r\n # Ignore first 10 bytes of HEADER contain FCS file format followed by 4 spaces\r\n fcs.read(10)\r\n\r\n for text in (\r\n \"$BEGINSTEXT\",\r\n \"$ENDSTEXT\",\r\n \"$BEGINDATA\",\r\n \"$ENDDATA\",\r\n ):\r\n text_offset = int(fcs.read(8))\r\n self.text_keywords[text] = text_offset + data_offset\r\n\r\n self.data_start = self.text_keywords[\"$BEGINDATA\"]\r\n self.data_end = self.text_keywords[\"$BEGINDATA\"]", "def sections(self):\n # self._sections will never have [DEFAULT] in it\n return self._sections.keys()", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def read_prism_hdr(hdr_path): \n with open(hdr_path, 'r') as input_f:\n header_list = input_f.readlines()\n \n return dict(item.strip().split() for item in header_list)", "def _read_header(self, valid_sections={}):\n linenum = self._linenum\n\n # It's possible that we'll be at the end of the file, with some blank\n # lines, or hand-editing (or bad diff generation) has led to some\n # blank lines before a header. We'll iterate through any blank lines\n # until we reach content or an End of File.\n while True:\n header, eof = self._read_until(b'\\n')\n\n if eof:\n return None\n\n if header.strip():\n break\n\n if self._file_newlines is None:\n # Given that we read up until a '\\n', one of these are guaranteed\n # to match.\n if header.endswith(b'\\r\\n'):\n self._file_newlines = b'\\r\\n'\n else:\n assert header.endswith(b'\\n')\n\n self._file_newlines = b'\\n'\n\n assert header.endswith(self._file_newlines)\n header = header[:-len(self._file_newlines)]\n\n m = self._HEADER_RE.match(header)\n\n if not m:\n raise DiffXParseError(\n 'Unexpected or improperly formatted header: %r' % header,\n linenum=linenum)\n\n # Validate the level and section ID.\n section_id = m.group('section_id').decode('ascii')\n\n if section_id not in valid_sections:\n raise DiffXParseError(\n 'Unknown or unexpected section ID \"%(section_id)s\". '\n 'Expected one of: %(valid_sections)s'\n % {\n 'section_id': section_id,\n 'valid_sections': ', '.join(\n '\"%s\"' % _valid_section\n for _valid_section in sorted(valid_sections)\n ),\n },\n linenum=linenum)\n\n section_type = m.group('section_type')\n level = len(m.group('level'))\n\n # Parse the options out of the header.\n options_str = m.group('options')\n options = {}\n\n if options_str:\n # Options should be present.\n #\n # As this is a reference implementation, this will be strict with\n # the format. There should be exactly one space between the\n # \"#<id>:\" and the options, one space between each comma-separated\n # pair, and each key and value are expected to match a specific set\n # of characters.\n for option_pair in options_str.split(b', '):\n option_key, option_value = option_pair.split(b'=', 1)\n\n if not self._HEADER_OPTION_KEY_RE.match(option_key):\n raise DiffXParseError(\n 'Header option key \"%s\" contains invalid characters'\n % option_key.decode('ascii'),\n linenum=linenum,\n column=header.index(option_pair))\n\n if not self._HEADER_OPTION_VALUE_RE.match(option_value):\n raise DiffXParseError(\n 'Header option value \"%(value)s\" for key \"%(key)s\" '\n 'contains invalid characters'\n % {\n 'key': option_key.decode('ascii'),\n 'value': option_value.decode('ascii'),\n },\n linenum=linenum,\n column=header.index(option_pair) + len(option_key) + 1)\n\n # These should safely decode, since we've validated the\n # characters above.\n option_key = option_key.decode('ascii')\n option_value = option_value.decode('ascii')\n\n # Convert the value to an integer, if it's a number.\n try:\n option_value = int(option_value)\n except ValueError:\n pass\n\n options[option_key] = option_value\n\n self._linenum += 1\n\n return {\n 'level': level,\n 'line': linenum,\n 'options': options,\n 'section': section_id,\n 'type': section_type.decode('ascii'),\n }" ]
[ "0.72855216", "0.69050896", "0.6674625", "0.6245168", "0.61457765", "0.5947562", "0.5752167", "0.55844647", "0.5572117", "0.55539584", "0.5457647", "0.54557514", "0.5420563", "0.5410607", "0.53924954", "0.5369167", "0.5365167", "0.5286985", "0.5227172", "0.5214238", "0.521347", "0.52128845", "0.52038157", "0.51770216", "0.5156479", "0.51537365", "0.51528025", "0.51260376", "0.50835174", "0.5069789" ]
0.7331083
0
Returns the section that contains the provided address, if any.
def section_by_address(self, address: int) -> Optional['Elf.Section']: # Iterate in reverse to give priority to sections with nonzero addresses for section in sorted(self.sections, reverse=True): if address in section.range(): return section return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_section_containing(self, addr):\n for s in self.sections:\n if s.contains_addr(addr - self.rebase_addr):\n return s\n\n return None", "def get_range(self, address):\n index = bisect.bisect(self.ranges, MemoryRange.from_addr(address))\n if index >= len(self.ranges):\n return None\n memrange = self.ranges[index-1]\n if address in memrange:\n return memrange\n return None", "def find_segment_containing(self, addr):\n for s in self.segments:\n if s.contains_addr(addr - self.rebase_addr):\n return s\n\n return None", "def getTableEntryByAddress(self, address):\n for entry in self.table:\n if entry.hasPage():\n if entry.getPage().contains(address):\n return entry\n return None", "def get_section(self, section_name: str) -> NetSection:\n return self.sections[section_name]", "def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None", "def get_section(self, section_name):\n section_name = JSONSchema.format_section_name(section_name).lower()\n try:\n return self._sections[section_name]\n except KeyError:\n raise AquaError('No section \"{0}\"'.format(section_name))", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def get_address(address_file):\n if not path.exists(address_file) :\n print(\"file not found :\", address_file)\n return None\n addr_file = open(address_file,'r')\n address = addr_file.readlines()\n return address[0]", "def section(self):\n return SECTION_NAME_TO_SECTION[self.section_name]", "def getPiece(self, address):\r\n \r\n return self.pieces.get(address, None)", "def get_by_address(self, address):\n assert len(address) == 20\n accounts = [account for account in self.accounts if account.address == address]\n if len(accounts) == 0:\n raise KeyError('account with address {} not found'.format(encode_hex(address)))\n elif len(accounts) > 1:\n log.warning('multiple accounts with same address found', address=encode_hex(address))\n return accounts[0]", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def read(self, addr: str) -> dict:\n for block in self.__mem:\n if block['address'] == addr:\n return block\n\n return {}", "def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)", "def get_conf_by_section(self, section):\n try:\n return get_conf(self.conf_file)[section]\n except:\n return None", "def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section", "def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section", "def getSection(self, role):\n rel = role.section.all()\n if len(rel):\n return rel[0]\n else:\n return None", "def get_node_by_address(root_node, address):\n assert address[0] == 0\n if len(address) == 1:\n return root_node\n return get_descendant_by_address(root_node, address[1:])", "def __getitem__(self, section):\n result = self.get(section)\n\n if result is None:\n raise KeyError(section)\n\n return result", "def is_const_section(addr):\n for section_addr, section_name in section_addrs.items(): \n if (section_name == '.const'):\n code_start = section_addr\n code_bytes = int(section_bytes[section_name])\n code_end = code_start + code_bytes\n if code_start <= addr < code_end:\n return True\n\n return False", "def resolve(self, address):\n address_map = self._address_map_from_spec_path(address.spec_path)\n if address not in address_map:\n self._raise_incorrect_address_error(address.spec_path, address.target_name, address_map)\n else:\n return address_map[address]", "def getInstructionContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...", "def get_contract_by_address(self, address: str):\n try:\n contract_records = self._registry.search(contract_address=address)\n except RuntimeError:\n raise self.InterfaceError('Corrupted Registrar') # TODO: Integrate with Registry\n else:\n if not contract_records:\n raise self.InterfaceError(\"No such contract with address {}\".format(address))\n return contract_records[0]", "def get_index_from_section(section):\n return section.rsplit(\"(\", 1)[1].rstrip(\")\")", "def get_import_at(doc, address):\n segment = doc.getSegmentAtAddress(address)\n if segment is not None:\n comment = segment.getCommentAtAddress(address)\n if comment.startswith(\"Imports from\"):\n return comment[13:]\n return None", "def _parse_addr(self, addr: str):\n addr = addr.upper()\n return self._registers_list.get(addr, None)", "def read_section(geo_file_path, section_marker):\n read = False\n section = []\n\n with open(geo_file_path) as file:\n lines = file.readlines()\n for line in lines:\n if line.startswith(section_marker):\n read = True\n\n if line.startswith(SECTION_END_MARKER):\n # Don't stop, section can be split\n read = False\n\n if read:\n section.append(line)\n\n return section", "def get_section(line: str) -> str:\n if len(line) < 2:\n raise Exception(\"Error: Section line can't be shorter than 2\")\n return line[1:len(line) - 1]" ]
[ "0.79777485", "0.65322", "0.6484256", "0.64789987", "0.63290364", "0.6012492", "0.59712535", "0.5960082", "0.59520346", "0.59450305", "0.58924216", "0.58774364", "0.5858443", "0.5850265", "0.58319354", "0.58101404", "0.57845306", "0.57845306", "0.57655644", "0.57516944", "0.57066184", "0.56735325", "0.5642449", "0.5639527", "0.5605207", "0.55355185", "0.5527403", "0.55257916", "0.5514526", "0.54952663" ]
0.84220016
0
Dumps a binary string containing the sections matching the regex.
def dump_sections(self, name: Union[str, Pattern[str]]) -> Dict[str, bytes]: name_regex = re.compile(name) sections: Dict[str, bytes] = {} for section in self.sections: if name_regex.match(section.name): self._elf.seek(section.file_offset + section.offset) sections[section.name] = self._elf.read(section.size) return sections
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def objdump_section(elf_path, section):\n assert isinstance(elf_path, str), 'ELF path must be str'\n assert section in SECTIONS, 'Not a vaild section'\n\n # Print the symbol table entries of the file. (-t)\n # Decode (demangle) low-level symbol names into user-level names. \n cmd = ['objdump', '-t', '--demangle', elf_path] \n objdump = subprocess.Popen(cmd, stdout=PIPE)\n\n fgrep = subprocess.Popen(['fgrep', section], stdin=objdump.stdout, stdout=PIPE)\n objdump.wait()\n objdump.stdout.close()\n \n # get fgrep output\n std_out = fgrep.communicate()[0]\n fgrep.stdout.close() \n\n return std_out.decode('utf-8')", "def printRegEx(rules):\n fd = None\n try:\n fd = open(\"all.re\", 'w')\n except:\n print(\"Could not open file to write out regex.\")\n for r in rules:\n for ts in r.getTS():\n for p in ts.getPkts():\n for c in p.getContent():\n fd.write(c.getContentString())\n fd.write(\"\\n\")\n if fd:\n fd.close()\n return [0, 0, 0]", "def hexdump(data):\n\n def is_hexdump_printable(b):\n return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n\n lines = []\n chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))\n\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))\n\n return '\\n'.join(lines)", "def hexdump( source, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, address_base=None ):\n for line in hexdump_iter( source, start, end, length, major_len, minor_len, colour, address_base ):\n print( line )", "def dump(self):\n dump_grammar(self.rules)\n print(self.registry)", "def dump(self, src, length=8):\r\n N=0; result=''\r\n while src:\r\n s,src = src[:length],src[length:]\r\n hexa = ' '.join([\"%02X\"%ord(x) for x in s])\r\n s = s.translate(self.FILTER)\r\n result += \"%04X %-*s %s\\n\" % (N, length*3, hexa, s)\r\n N+=length\r\n return result", "def dump(self, contents=False):\n print(self)\n for region in self.regions:\n print(region)\n hexdump(region.data, address=region.address)", "def dump(self):\n outputs = [\"Code object : %s\" % self.name]\n outputs.append(\" Type : %s\" % self.object_type)\n for source_line in self.source:\n # Each line is a (line_number, code) pair\n outputs.append('%d: %s' % source_line)\n return \"\".join(outputs)", "def recdump(self, args):\n if args.index:\n dbfile = self.index\n elif args.sys:\n dbfile = self.sys\n elif args.stru:\n dbfile = self.stru\n else:\n dbfile = self.bank\n\n if not dbfile:\n print(\".dat not found\")\n return\n nerr = 0\n nr_recnone = 0\n nr_recempty = 0\n tabidxref = [0] * 256\n bytexref = [0] * 256\n for i in range(1, args.maxrecs + 1):\n try:\n data = dbfile.readrec(i)\n if args.find1d:\n if data and (data.find(b\"\\x1d\") > 0 or data.find(b\"\\x1b\") > 0):\n print(\"record with '1d': %d -> %s\" % (i, b2a_hex(data)))\n break\n\n elif not args.stats:\n if data is None:\n print(\"%5d: <deleted>\" % i)\n else:\n print(\"%5d: %s\" % (i, toout(args, data)))\n else:\n if data is None:\n nr_recnone += 1\n elif not len(data):\n nr_recempty += 1\n else:\n tabidxref[data[0]] += 1\n for b in data[1:]:\n bytexref[b] += 1\n nerr = 0\n except IndexError:\n break\n except Exception as e:\n print(\"%5d: <%s>\" % (i, e))\n if args.debug:\n raise\n nerr += 1\n if nerr > 5:\n break\n\n if args.stats:\n print(\"-- table-id stats --, %d * none, %d * empty\" % (nr_recnone, nr_recempty))\n for k, v in enumerate(tabidxref):\n if v:\n print(\"%5d * %02x\" % (v, k))\n print(\"-- byte stats --\")\n for k, v in enumerate(bytexref):\n if v:\n print(\"%5d * %02x\" % (v, k))", "def dump(memory, startad, endad):\n res = \"\"\n gap = 0\n \n # but not always\n if cpu.thumb_mode: instr_size = 2\n else: instr_size = 4\n \n \n # Made the caller align the address\n #startad = (startad//instr_size)*instr_size\n #endad = (endad//instr_size)*instr_size\n \n i = startad\n \n addr = startad\n while addr <= endad:\n if i <= addr:\n res += new_section_check(addr)\n #if is_const_section(addr): # for debug\n # pass\n membytes = cpu.read_memory(addr, instr_size)\n if membytes is not None:\n memword = int.from_bytes(membytes, 'little')\n else:\n print(\"Reading uninitialized memory, address = {:#x}\".format(addr))\n memword = 0 # should this happen\n \n if is_code_section(addr):\n if cpu.thumb_mode: \n #if is_thumb32(memword):\n # instr_size = 4\n #else:\n instr_size = 2\n else:\n instr_size = 2 \n \n if memword == 0: \n gap += 1\n i+= instr_size\n else:\n if gap > 0:\n res += \"*** gap of {:d} halfword\".format(gap)\n if gap > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n if addr in symdict:\n res += '\\n---------' + '<' + symdict[addr] + '>:\\n'\n mbytes = membytes[0:instr_size]\n instr = cpu.read_memory_int(addr, 4)\n \n tmp_res, actual_instr_size = disass.disass(addr, \n instr, \n cpu.thumb_mode)\n if cpu.thumb_mode:\n if actual_instr_size == 4:\n extrabytes = my_hex(cpu.read_memory(addr+2, 2))\n else:\n extrabytes = \" \"\n else:\n extrabytes = \"\"\n res += \"{:08x} \".format(addr) + my_hex(\n mbytes) + \" \" + extrabytes + \" \" + tmp_res + \"\\n\"\n i += actual_instr_size # Thumb 2 could be either \n else:\n \n if (addr % 4) == 0:\n instr_size = 4 # not code, not an instruction\n if memword != 0:\n if gap > 1:\n res += \"*** repeats for {:d} word\".format(gap-1)\n if (gap-1) > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n mbytes = cpu.read_memory(addr, 4)\n res += \"{:08x} \".format(addr)+'.word '+my_hex(mbytes)+\"\\n\"\n else:\n if gap == 0:\n res += \"{:08x} \".format(addr)+'.word 0\\n'\n gap += 1\n i += 4\n else: \n instr_size = 2\n res += \"{:08x} \".format(addr)+'.word '+my_hex(membytes)+\"\\n\"\n i += 2 \n addr = cpu.get_next_addr(addr, instr_size)\n if addr is None:\n addr = endad+instr_size \n \n return res", "def regex_compiled():\n return re.compile(SBE19HardwareParticle.regex(), re.DOTALL)", "def dump(self):\n # omit output if there's no content.\n if not self.has_content():\n return\n\n guard = self.get_guard()\n\n print self.GUARD_FORMAT.format(\"BEGIN \" + guard)\n self.dump_content()\n print self.GUARD_FORMAT.format(\"END \" + guard)", "def dump_patterns(patterns):\n return u\"[\\n %s\\n]\" % u\"\\n,\".join(\n [json.dumps(patt) for patt in patterns])", "def dump_compiler(input_bytes):\n return dump_from_release(input_bytes, \"compiler\")", "def dump(self):\n print PccUtString.trimString(self.dumpBuf(), \"\\n\")\n return self", "def _magic_g(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s)\n show_disasm(idapy._d, a, a+80)", "def hexdump(data, columns=4, blocksize=4):\n\tblocks = splitevery(data, blocksize)\n\n\t# calculate number of rows given columns\n\trow_count,remain = divmod(len(blocks), columns)\n\tif remain > 0:\n\t\trow_count += 1\n\n\trows = []\n\t# row length includes 2 chars for hex and 1 for spaces\n\trowlen = columns*(2*blocksize+1) \n\t# printable chars, in this context, dont include whitespace\n\tprintable = string.digits + string.letters + string.punctuation \n\n\tfor i in range(0, row_count):\n\t\tstart = i*columns\n\t\tascii_string = ''\n\t\trow = ''\n\t\t# add the hex\n\t\tfor block in blocks[start:start+columns]:\n\t\t\trow += block.encode('hex') + ' '\n\t\t\tascii_string += ''.join([x if x in printable else ' ' for x in block])\n\t\t# pad last row with spaces so ascii strings align\n\t\trows.append(row.ljust(rowlen) + ascii_string)\n\n\treturn '\\n'.join(rows)", "def dump(start=0, end=0x2000):\n find(start)\n address = start\n while address < end:\n # build list of 8 bytes with their ascii repr\n byte_list = [read(type='int', inc=True) for i in range(8)]\n ascii_repr = ''.join([(unichr(b) if b>31 and b<127 else '.') \n for b in byte_list])\n byte_string = ' '.join(['%02X' % i for i in byte_list])\n print \"%04X %s %s\" % (address, byte_string, ascii_repr)\n address += 8", "def dump(self, filename, regFilterList=None, userMsg=''):\n pass", "def get_binary(self):\n\n assert len(self._pattern) <= 128\n\n rtr = bytes()\n\n for word in self._pattern:\n rtr += struct.pack(\"<H\", word)\n return rtr", "def _magic_bgmt(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s, rounded_32bit = False)\n f = idapy._d.Fun(\"gui_massive_event_loop\")\n r = find_refs(idapy._d, a, f.addr)\n \n for a,v in r:\n bkt.back_deco(a)\n \n print r", "def dump_release(input_bytes):\n return dump_from_release(input_bytes, \"release\")", "def dump(self):\n self.hasher.update_time_dicts() # Makes the time measurements available\n\n print(\" Creating a results folder in {} and storing all results there.\".format(self.config.output_dir))\n if not os.path.isdir(self.config.output_dir):\n os.mkdir(self.config.output_dir)\n\n print(\" Dumping profile ...\")\n profile_file_name = \"{}_{}_profile\".format(self.name, self.config.mode)\n with open(os.path.join(self.config.output_dir, profile_file_name), \"a\") as file:\n profile = {\"config\": self.config.dump(),\n \"hash\": self.hasher.hash_time_dict,\n \"find\": self.hasher.find_time_dict}\n\n json.dump(profile, file)\n\n print(\" Dumping matches ...\")\n for i, match in enumerate(self.__matched_offsets):\n if int(match[0] > match[1]):\n offset_a = match[1]\n offset_b = match[0]\n else:\n offset_a = match[0]\n offset_b = match[1]\n\n match_file_name = \"{}_{}_{}_{}\".format(self.name, self.config.mode, offset_a, offset_b)\n with open(os.path.join(self.config.output_dir, match_file_name), \"w\") as file:\n infos = \"Config:\\n: {}\".format(self.config)\n text_a = \"\"\n text_b = \"\"\n if self.config.dump_text:\n text_a = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_a))\n text_b = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_b))\n\n file.write(\"{}\\n\\n{}\\n\\n{}\\n\\n{}\".format(infos, text_a, \"#\"*25, text_b))\n\n if self.config.dump_graph:\n print(\" Creating graphs ...\")\n x1, x2 = list(), list()\n y1, y2 = list(), list()\n t_all = 0\n for element, t in self.hasher.hash_time_dict.items():\n t_all += t\n x1.append(element)\n y1.append(t_all)\n\n t_all = 0\n for element, t in self.hasher.find_time_dict.items():\n t_all += t\n x2.append(element)\n y2.append(t_all)\n\n self.__plot(os.path.join(self.config.output_dir, \"hash_time\"), x1, y1)\n self.__plot(os.path.join(self.config.output_dir, \"find_time\"), x2, y2)\n\n print(\"\\n\\n\")\n\n return", "def dump_section(config, section, no_include_opts=None):\n new_config = rose.config.ConfigNode()\n if no_include_opts is None:\n no_include_opts = []\n\n for keylist, opt_node in config.walk([section]):\n option = keylist[1]\n if option in no_include_opts:\n continue\n new_config.value[option] = opt_node\n\n config_string_file = StringIO.StringIO()\n rose.config.dump(new_config, config_string_file)\n config_string_file.seek(0)\n return config_string_file.read()", "def dump_data(ser, meta, args):\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n\n command = b\"TEXT.DUMP\\r\"\n rx = \"\"\n ntry = 0\n while not rx or (rx.split()[-1] != \"data?\"):\n rx = send_cmd(ser, command, args.debug)\n # sys.stderr.write(rx)\n ntry += 1\n if ntry > 3:\n LOGGER.warning(\"Wrong response to dump command ({})\".format(command))\n return 0\n\n command = b\"Y\"\n rx = \"\"\n ntry = 0\n while not rx or (rx.split()[-1] != \"ready\"):\n rx = send_cmd(ser, command, args.debug)\n # sys.stderr.write(rx)\n ntry += 1\n if ntry > 3:\n LOGGER.warning(\"Wrong response to dump command ({})\".format(command))\n return 0\n\n b = b\"\\r\"\n n = ser.write(b)\n if args.debug:\n LOGGER.debug(\"{} byte ({}) written to port\\n\".format(n, repr(b)))\n time.sleep(0.05)\n\n dumpst = time.time()\n suff = \"\"\n if meta.badclock:\n suff = \"-badclock\"\n\n fname = \"{}/{}sb.{}{}\".format(args.path, meta.modserial, args.calday, suff)\n fh = open(fname, \"w\")\n\n fraw = \"\"\n rxline = 1\n try:\n while rxline:\n rxline = ser.readline()\n if rxline:\n sys.stdout.write(rxline.decode(errors=\"replace\"))\n fout = crlfpat.sub(linend, rxline.decode(errors=\"replace\"))\n if cafepat.search(fout):\n meta.cafe = True\n fh.write(fout)\n except KeyboardInterrupt:\n interrupt = b\"\\x03\"\n send_cmd(ser, interrupt, args.debug)\n # time.sleep(1.0)\n # rxline = 1\n # while rxline:\n # rxline = ser.readline()\n # if rxline:\n # sys.stdout.write(rxline.decode(errors='replace'))\n # fout = crlfpat.sub(linend, rxline.decode(errors='replace'))\n # fh.write(fout)\n ser.reset_input_buffer()\n fh.close()\n fsize = os.stat(fname).st_size\n frename = fname + \"-abort\"\n os.rename(fname, frename)\n sys.stderr.write(\"\\n\\n\")\n LOGGER.warning(\"Download aborted: wrote {} bytes to {}\".format(fsize, frename))\n return 0\n\n fh.close()\n\n if meta.cafe:\n frename = fname + \"-cafe\"\n os.rename(fname, frename)\n fname = frename\n\n dumpend = time.time()\n etsec = dumpend - dumpst\n dtet = datetime(1900, 1, 1, 0, 0, 0) + timedelta(seconds=etsec)\n\n fsize = os.stat(fname).st_size\n sys.stderr.write(\"\\n\\n\")\n if meta.badclock or meta.cafe:\n LOGGER.warning(\"Wrote {} bytes to {}\".format(fsize, fname))\n else:\n LOGGER.info(\"Wrote {} bytes to {}\".format(fsize, fname))\n LOGGER.info(\n \"Dumped {} records in {} (hh:mm:ss)\".format(meta.ndumprec, dtet.strftime(etfmt))\n )\n\n FLOGGER.info(\"Wrote {} bytes to {}\".format(fsize, fname))\n FLOGGER.info(\n \"Dumped {} records in {} (hh:mm:ss)\".format(meta.ndumprec, dtet.strftime(etfmt))\n )\n\n return fsize", "def hexdump(args=None):\n args = parser.parse_args(args)\n with LogSetup(args):\n contents = args.file.read()\n args.file.close()\n dump(contents, width=args.width)", "def dump(memory, startad, endad): # for msp430\n res = \"\"\n gap = 0\n\n #cpu.thumb_mode = False\n instr_size = 2\n \n i = startad\n \n addr = startad\n while addr <= endad:\n if i <= addr:\n res += new_section_check(addr)\n #if is_const_section(addr): # for debug\n # pass\n membytes = cpu.read_memory(addr, instr_size)\n if membytes is not None:\n memword = int.from_bytes(membytes, 'little')\n else:\n print(\"Reading uninitialized memory, address = {:#x}\".format(addr))\n memword = 0 # should this happen (it did due to a bug)\n \n if is_code_section(addr): \n if machine == \"MSP430\":\n instr_size = 2 # always true in the code section\n if memword == 0: \n gap += 1\n i+= instr_size\n else:\n if gap > 0:\n res += \"*** gap of {:d} halfword\".format(gap)\n if gap > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n if addr in symdict:\n res += '\\n---------' + '<' + symdict[addr] + '>:\\n'\n mbytes = membytes[0:instr_size]\n instr = [0,0,0]\n instr[0] = cpu.read_memory_int(addr, instr_size)\n instr[1] = cpu.read_memory_int(addr+2, instr_size)\n instr[2] = cpu.read_memory_int(addr+4, instr_size)\n \n tmp_res, actual_instr_size = disass.disass(addr, \n instr, \n dummy = False)\n \n if actual_instr_size == 2:\n extra1 = extra2 = \" \"\n elif actual_instr_size == 4:\n extra2 = \" \"\n extra1 = '{:04x}'.format(instr[1])\n elif actual_instr_size == 6:\n extra1 = '{:04x}'.format(instr[1])\n extra2 = '{:04x}'.format(instr[2])\n else: # a bug\n log(\"bug in dump\")\n extra1 = extra2 = \"\"\n \n res += \"{:08x} \".format(addr) + my_hex(\n mbytes) + \" \" + extra1 + \" \" + extra2 + \" \" + tmp_res + \"\\n\"\n i += actual_instr_size \n else:\n \n if (addr % 4) == 0:\n instr_size = 4 # not code, not an instruction\n if memword != 0:\n if gap > 1:\n res += \"*** repeats for {:d} word\".format(gap-1)\n if (gap-1) > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n mbytes = cpu.read_memory(addr, 4)\n res += \"{:08x} \".format(addr)+'.word '+my_hex(mbytes)+\"\\n\"\n else:\n if gap == 0:\n res += \"{:08x} \".format(addr)+'.word 0\\n'\n gap += 1\n i += 4\n else: \n instr_size = 2\n if membytes is not None:\n res += \"{:08x} \".format(addr)+'.word '+my_hex(\n membytes)+\"\\n\"\n i += 2 \n addr = cpu.get_next_addr(addr, instr_size)\n if addr is None:\n addr = endad+instr_size\n \n return res", "def dumps(self):\n return ''.join(self.out)", "def dump (data, file = None, dumpLength = 16):\n\tif isBytes(data):\n\t\tdata = data.decode(\"latin-1\")\n\t# If no defined file\n\tif file == None:\n\t\tfile = StringIO()\n\t\n\tfor j in range (0, len (data), dumpLength):\n\t\tfile.write('%08X ' % toInteger(j))\n\t\tdumpLine (data [j:j + dumpLength], file, dumpLength)\n\t\tfile.write('\\n')\n\n\t# If a string can be returned\n\tif type (file) == type (StringIO()):\n\t\treturn file.getvalue()\n\telse:\n\t\treturn None", "def test_tabledump(self):\n datastr = (\n '\" 1\" \"abc\" \" 3.70000007152557\" \" 0\"\\n'\n '\" 2\" \"xy \" \" 6.69999971389771\" \" 1\"\\n'\n )\n cdstr = (\n 'c1 1J I11 \"\" \"\"'\n ' -2147483647 \"\" \"\" \\n'\n 'c2 3A A3 \"\" \"\"'\n ' \"\" \"\" \"\" \\n'\n 'c3 1E G15.7 \"\" \"\"'\n ' \"\" 3 0.4 \\n'\n 'c4 1L L6 \"\" \"\"'\n ' \"\" \"\" \"\" \\n'\n )\n # copy fits file to the temp directory\n self.copy_file(\"tb.fits\")\n\n # test without datafile\n fits.tabledump(self.temp(\"tb.fits\"))\n assert os.path.isfile(self.temp(\"tb_1.txt\"))\n\n # test with datafile\n fits.tabledump(self.temp(\"tb.fits\"), datafile=self.temp(\"test_tb.txt\"))\n assert os.path.isfile(self.temp(\"test_tb.txt\"))\n\n # test with datafile and cdfile\n datafile = self.temp(\"data.txt\")\n cdfile = self.temp(\"coldefs.txt\")\n fits.tabledump(self.temp(\"tb.fits\"), datafile, cdfile)\n assert os.path.isfile(datafile)\n with open(datafile) as data:\n assert data.read() == datastr\n with open(cdfile) as coldefs:\n assert coldefs.read() == cdstr" ]
[ "0.5618182", "0.52968454", "0.5151692", "0.50999147", "0.50382054", "0.50097686", "0.48782483", "0.48392582", "0.4833646", "0.4833251", "0.4806635", "0.4752081", "0.4741569", "0.47281152", "0.47145468", "0.4710047", "0.46879074", "0.4660579", "0.46327552", "0.46313748", "0.46248564", "0.46247238", "0.4616887", "0.46113282", "0.45881757", "0.4560285", "0.455148", "0.45455873", "0.45272785", "0.45262933" ]
0.5936862
0
Returns the total nonnegative information theory entropy, give the number of observations for each different group
def entropy(group_counts): total = sum(group_counts) entro = 0 for item_count in group_counts: entro += item_entropy(item_count, total) return entro
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def cal_globalEntropy(self):\n group_score = []\n pop_total = np.sum(self.pop_sum)\n prop = np.asarray(np.sum(self.pop, axis=0))[0]\n\n # loop at sum of each population groups\n for group in prop:\n group_idx = group / pop_total * np.log(1 / (group / pop_total))\n group_score.append(group_idx)\n entropy = np.sum(group_score)\n\n return entropy", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def nats(self) -> float:\n return self.entropy()", "def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy", "def entropy(self):\n raise NotImplementedError", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def entropy(self,classData):\n\n\t\t###### your implementation below ######\n\t\ttotalEntropy = 0\n\t\ttempSet = {}\n\t\tfor i in range(len(classData)):\n\t\t\ttempSet[classData[i]] = (classData.count(classData[i]))\n\t\ttotal = sum(tempSet.values())\n\t\tfor x in tempSet:\n\t\t\ttotalEntropy += -(tempSet[x]/total * math.log(tempSet[x]/total, 2))\n\t\treturn totalEntropy", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def calculate_entropy(dataset) :\n\n num_entries = len(dataset)\n label_counts = {}\n for vector in dataset :\n # the label is at the last index of the data set\n current_label = vector[-1]\n if current_label not in label_counts :\n label_counts[current_label] = 0\n label_counts[current_label] += 1\n # Calculate the entropy\n entropy = 0.0\n for label in label_counts :\n # Calculate probability of each label within the dataset\n prob_of_label = label_counts[label]/num_entries\n # Since the entropy is the negative of the sum of all probability,\n # simply subtract it\n entropy -= prob_of_label * log(prob_of_label, 2)\n return entropy", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation", "def get_data_entropy(local_data, decision_attribute):\n\tvalues_array = []\n\tresult = 0\n\tdata_set_size = len(local_data)\n\tvalues = get_unique_values(local_data, decision_attribute)\n\tfor value in values:\n\t\tnumber = count_values_for_attribute(local_data, decision_attribute, value)\n\t\tvalues_array.append({value: number})\n\n\tfor i in range(len(values_array)):\n\t\tfor key in (values_array[i]).keys():\n\t\t\tv = values_array[i][key]\n\t\t\tval = int(v)\n\t\t\tresult += -1 * (val / data_set_size) * math.log2(val / data_set_size)\n\treturn result", "def calc_entropy(data_set):\n size = len(data_set)\n label_counts = {}\n for feat_vector in data_set:\n label = feat_vector[-1]\n label_counts.setdefault(label, 0)\n label_counts[label] += 1\n\n entropy = 0.0\n for key, count in label_counts.iteritems():\n prob = float(count) / size\n entropy -= prob * log(prob, 2)\n\n return entropy", "def entropy(self, dataset, target_attr):\n freq = {} #A dictionary to counts how many samples for each target classification \n data_entropy = 0.0\n samplenumbers = len(dataset) #Total number of samplers in data set\n \n #Calculate the frequency of each of the values in the target attribute\n for record in dataset:\n if (record[target_attr] in freq):\n freq[record[target_attr]] += 1.0\n else:\n freq[record[target_attr]] = 1.0\n \n # Calculate the entropy of the data for the target attribute\n for freq in list(freq.values()):\n data_entropy += (-freq/samplenumbers) * math.log(freq/samplenumbers, 2) \n \n return data_entropy", "def get_entropy(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return sum([-(float(label_count[label]) /\n total_count) * np.log2(float(label_count[label]) / total_count)\n for label in label_count.keys()])", "def entropy(distribution, unit=2):\n frequencies = distribution.frequencies(normalised=True)\n # check to see if it is a deterministic case (all but one are zero)\n zeros_size = frequencies[frequencies == 0].size\n if zeros_size + 1 == frequencies.size:\n return 0\n else:\n return np.sum(-frequencies * np.log2(frequencies) / np.log2(unit))", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])" ]
[ "0.70600384", "0.68918896", "0.68384033", "0.67932045", "0.67821366", "0.6753284", "0.673469", "0.67291254", "0.66924876", "0.6660583", "0.6636825", "0.6634891", "0.662265", "0.65900844", "0.65794265", "0.6562032", "0.65568435", "0.6515372", "0.650739", "0.64776325", "0.6471337", "0.64699733", "0.64495814", "0.64266753", "0.6425339", "0.6376697", "0.6364972", "0.6357612", "0.6324415", "0.6320084" ]
0.777499
0
Returns the nonnegative information theory entropy for a single item, give the number of observations of this item and the total number of observations.
def item_entropy(item_count, total_count): # Two cases where the entropy is 0 if item_count == total_count or item_count == 0: return 0 item_prob = 1.0 * item_count / total_count return -item_prob * math.log(item_prob)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(self):\n raise NotImplementedError", "def entropy(self):\n return self._normal.entropy()", "def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro", "def nats(self) -> float:\n return self.entropy()", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(self):\n return self._entropy_func", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def _entropy(self):\n return self.rv.entropy(*self._pymc_dists_to_value(self.args), **self.kwds)", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def entropy(self) -> float:\n probabilities = np.array([len(players) for players in self.answers.values()])\n probabilities = probabilities / sum(probabilities)\n return sc.stats.entropy(probabilities)", "def entropy(self,classData):\n\n\t\t###### your implementation below ######\n\t\ttotalEntropy = 0\n\t\ttempSet = {}\n\t\tfor i in range(len(classData)):\n\t\t\ttempSet[classData[i]] = (classData.count(classData[i]))\n\t\ttotal = sum(tempSet.values())\n\t\tfor x in tempSet:\n\t\t\ttotalEntropy += -(tempSet[x]/total * math.log(tempSet[x]/total, 2))\n\t\treturn totalEntropy", "def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def entropy(self, policy_params):\n return self.head.entropy(policy_params)", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def entropy_root(self):\n\t\tif self.size() == 0:\n\t\t\treturn 0\n\n\t\tcounter = collections.Counter(self.y)\n\t\treturn entropy([x[1] for x in counter.items()], self.size())", "def computeEntropy(self, img):\n hist, bins = np.histogram(img.ravel(), bins=256, density=True)\n return scipy.stats.entropy(hist)", "def entropy(distribution, unit=2):\n frequencies = distribution.frequencies(normalised=True)\n # check to see if it is a deterministic case (all but one are zero)\n zeros_size = frequencies[frequencies == 0].size\n if zeros_size + 1 == frequencies.size:\n return 0\n else:\n return np.sum(-frequencies * np.log2(frequencies) / np.log2(unit))", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG" ]
[ "0.685057", "0.672762", "0.6655158", "0.6543229", "0.64860266", "0.6456368", "0.63764286", "0.63019365", "0.6282704", "0.62645656", "0.6252446", "0.62362313", "0.62137896", "0.6163142", "0.61288327", "0.61129856", "0.60930246", "0.6091678", "0.60855293", "0.6058805", "0.60244054", "0.59921485", "0.5988065", "0.59859693", "0.59819305", "0.5981668", "0.5979606", "0.5961241", "0.59576994", "0.5859685" ]
0.70089054
0
Make Dataset with separate questions after cleaning them
def makedata(): # train print('Clean Train Dataset and separate questions') df = pd.read_csv(TRAIN_DATASET).replace(np.nan, ' ') t = df.shape[0] * 2 print t df['question1'] = cleanText(df['question1']) df['question2'] = cleanText(df['question2']) df.to_csv(os.path.join(rootpathdata_cleaned, 'train.csx'), index=False) overallquestions = df['question1'].tolist() + df['question2'].tolist() tpm = pd.DataFrame() tpm['question'] = overallquestions tpm.to_csv(os.path.join(rootpathdata_cleaned, 'train_allquestions.csx'), index=False) # test print('Clean Test Dataset and separate questions') df = pd.read_csv(TEST_DATASET).fillna(' ') t1 = df.shape[0] * 2 df['question1'] = cleanText(df['question1']) df['question2'] = cleanText(df['question2']) df.to_csv(os.path.join(rootpathdata_cleaned, 'test.csx'), index=False) overallquestions += df['question1'].tolist() + df['question2'].tolist() tpm = pd.DataFrame() tpm['question'] = overallquestions tpm.to_csv(os.path.join(rootpathdata_cleaned, 'test_allquestions.csx'), index=False) print len(overallquestions), t1 + t
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_questions(self, number_of_questions):", "def _filter_unanswerable_samples(self):\n a = []\n q = []\n annotations = []\n for i in range(len(self.answers)):\n if len(self.answers[i].nonzero()) > 0:\n a.append(self.answers[i])\n q.append(self.questions[i])\n\n annotations.append(self.annotations[i])\n self.answers = a\n self.questions = q\n self.annotations = annotations", "def CosmosQADataset(cosmos_qa_type):\n assert(isinstance(cosmos_qa_type, CosmosQAType))\n download_dataset(Collection.COSMOS_QA, check_shallow_integrity)\n\n def extract_answers(entry):\n for i in range(0, 4):\n key = \"answer{}\".format(i)\n answer = entry[key]\n assert(isinstance(answer, string_types))\n yield answer\n del entry[key]\n\n all_ids = set()\n all_data = []\n with open(type_to_data_file(cosmos_qa_type), \"rt\") as f:\n for line in f:\n entry = json.loads(line)\n assert(isinstance(entry, dict))\n if cosmos_qa_type != CosmosQAType.TEST:\n assert(len(entry) == 8)\n else:\n assert(len(entry) == 7)\n\n # Extract data.\n question_id = entry[\"id\"]\n question = entry[\"question\"]\n context = entry[\"context\"]\n answers = list(extract_answers(entry))\n label = entry.get(\"label\", None)\n if label is not None:\n label = chr(ord('A') + int(label))\n\n # Validate data.\n assert(isinstance(question_id, string_types))\n assert(isinstance(question, string_types))\n assert(isinstance(context, string_types))\n assert(isinstance(answers, list) and len(answers) == 4)\n if cosmos_qa_type == CosmosQAType.TEST:\n assert(label is None)\n else:\n assert(label in [\"A\", \"B\", \"C\", \"D\"])\n\n assert(question_id not in all_ids)\n all_ids.add(question_id)\n all_data.append({\n \"id\": question_id,\n \"question\": question,\n \"context\": context,\n \"answers\": answers,\n \"correct\": label,\n })\n assert(len(all_data) == len(all_ids))\n df = pd.DataFrame(all_data)\n return df", "def clean_questions(self, q_dot=False):\n for q in self.data['questions']:\n temp = re.sub('[^ a-zA-Z0-9]', '', q['body'])\n temp = temp.strip().lower()\n # temp = self.stemming(temp).strip()\n self.cleaned_questions_q_dot += temp + '?\\n'\n self.cleaned_questions += temp + '\\n'\n self.types_q += q['type'].strip() + '\\n'", "def normalize_dataset(self):", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def reset_questions(questions):\n random.shuffle(questions)", "def quora_data_prep(data_path, save_path=None, split_perc=0.3, line_text_file=False):\n data_path = Path(data_path)\n if save_path is None:\n save_path = Path(data_path)\n else:\n save_path = Path(save_path)\n\n quora_df = pd.read_csv(data_path / \"quora_duplicate_questions.tsv\", sep=\"\\t\")\n print(f\"Original Quora dataset has {len(quora_df)} entries.\")\n\n if line_text_file:\n filename = save_path / \"quora_full_line_text.txt\"\n quora_full_df = pd.concat([quora_df[\"question1\"], quora_df[\"question2\"]])\n print(\n f\"Quora full line text ({len(quora_full_df)} entries) is saved to '{filename}'\"\n )\n quora_full_df.to_csv(filename, index=False, header=False)\n else:\n quora_df = quora_df[quora_df[\"is_duplicate\"] == 1]\n print(\n f\"After non-duplicate dropped, Quora dataset has {len(quora_df)} entries.\"\n )\n quora_df = quora_df.drop([\"id\", \"qid1\", \"qid2\", \"is_duplicate\"], axis=1)\n quora_df = quora_df.rename(columns={\"question1\": \"src\", \"question2\": \"tgt\"})\n quora = quora_df.to_dict(\"records\")\n\n random.shuffle(quora)\n train_data = quora[: int(len(quora) * (1 - split_perc))]\n val_data = quora[int(len(quora) * (1 - split_perc)) :]\n\n train_filename = save_path / \"quora_train.pkl\"\n print(\n f\"Quora train split ({len(train_data)} entries) is saved to '{train_filename}'\"\n )\n dump(train_data, train_filename)\n val_filename = save_path / \"quora_val.pkl\"\n print(f\"Quora val split ({len(val_data)} entries) is saved to '{val_filename}'\")\n dump(val_data, val_filename)", "def preprocess_dataset(dataset, tokenizer):\n eos = torch.tensor([tokenizer.eos_token_id], dtype=torch.long)\n q_start = torch.tensor(tokenizer.encode('question:'), dtype=torch.long)\n q_end = torch.tensor(tokenizer.encode(':question'), dtype=torch.long)\n\n tensors = [[] for i in range(3)]\n for i in trange(len(dataset)):\n example = dataset[i]\n\n context_start_idx = (example[2] == 1).nonzero()[0].item()\n if example[1][-1] == 1:\n context_end_idx = len(example[1]) - 1\n else:\n context_end_idx = (example[1] == 0).nonzero()[0].item()\n ans_start = example[3] - context_start_idx\n ans_end = example[4] - context_start_idx\n\n context = example[0][context_start_idx: context_end_idx]\n question = example[0][: context_start_idx]\n answer = example[0][example[3]: example[4] + 1]\n\n input_ids = torch.cat([\n context,\n eos,\n answer,\n eos,\n q_start,\n question,\n q_end,\n eos\n ])\n\n attention_mask = torch.ones_like(input_ids, dtype=torch.long)\n token_type_ids = torch.cat([\n torch.zeros(len(context) + 1, dtype=torch.long),\n torch.ones(len(answer) + 1, dtype=torch.long),\n 2 * torch.ones(len(question) + 3, dtype=torch.long)\n ])\n token_type_ids[ans_start: ans_end + 1] = 1\n\n tensors[0].append(input_ids)\n tensors[1].append(attention_mask)\n tensors[2].append(token_type_ids)\n\n tensors_padded = []\n for i, sequences in enumerate(tqdm(tensors)):\n padded = pad_sequence(sequences, batch_first=True)\n tensors_padded.append(padded)\n\n new_dataset = TensorDataset(*tensors_padded)\n return new_dataset", "def get_questions():\n fields_dt = ['name', 'category', 'key', 'text']\n questions = frappe.db.get_list('Big Five Factor Model',\n fields=fields_dt)\n\n # Ordenamiendo random: se aplica sobre el objeto original\n suffle_data = random.shuffle(questions)\n\n return questions", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def prepare_datasets(target_filename='data'):\n data_cornell = np.array(datasets.readCornellData('__data__/cornell/', max_len=1000000))\n data_opensubs = np.array(datasets.readOpensubsData('__data__/opensubs/', max_len=1000000))\n\n data = np.concatenate([data_cornell, data_opensubs], axis=0)\n del data_cornell, data_opensubs\n\n pd.DataFrame(data, columns=('question', 'answer')).to_feather('__data__/'+target_filename+'.feather')", "def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions", "def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def CreateValidationDataset(all_arrays):\n validation_dataset = Dataset()\n validation_dataset._addData(all_arrays[2])\n validation_dataset._addData(all_arrays[7])\n return validation_dataset", "def create_dataset():\n x_old, y_old = clean_scores_version1()\n\n # delete duplicates\n x_old = np.unique(x_old, axis=0)\n\n file = open('/Users/kira/Desktop/uni/Connect4/agents/agent_supervised_ml/unlabeled2.txt', \"a\")\n\n for row in x_old:\n string = ''\n move_seq = row[row != 0]\n for move in move_seq:\n string = string + str(move)\n for i in range(1, 8):\n file.write(string + str(i) + '\\n')\n\n file.close()", "def _clean_data(self, dataset):\n dataset.dropna(inplace=True)\n # Problem: handle missing data (in a different way), noisy data, inconsistent data", "def cleanup_dataset(euctr_cond):\n euctr_cond['date_of_the_global_end_of_the_trial'] = pd.to_datetime(euctr_cond['date_of_the_global_end_of_the_trial'])\n euctr_cond['trial_is_part_of_a_paediatric_investigation_plan'] = (euctr_cond['trial_is_part_of_a_paediatric_investigation_plan'] == True).astype(int)\n euctr_cond['trial_human_pharmacology_phase_i'] = (euctr_cond['trial_human_pharmacology_phase_i']== True).astype(int)\n euctr_cond['trial_therapeutic_exploratory_phase_ii'] = (euctr_cond['trial_therapeutic_exploratory_phase_ii']== True).astype(int)\n euctr_cond['trial_therapeutic_confirmatory_phase_iii'] = (euctr_cond['trial_therapeutic_confirmatory_phase_iii']== True).astype(int)\n euctr_cond['trial_therapeutic_use_phase_iv'] = (euctr_cond['trial_therapeutic_use_phase_iv']== True).astype(int)\n euctr_cond['not_bioequivalence_study'] = (euctr_cond['trial_bioequivalence_study']== False).astype(int)\n euctr_cond['trial_bioequivalence_study'] = (euctr_cond['trial_bioequivalence_study']== True).astype(int)\n euctr_cond['rare_disease_blank'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'Information not present in EudraCT').astype(int)\n euctr_cond['not_rare_disease'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'No').astype(int)\n euctr_cond['trial_condition_being_studied_is_a_rare_disease'] = (euctr_cond['trial_condition_being_studied_is_a_rare_disease'] == 'Yes').astype(int)\n euctr_cond['not_single_blind'] = (euctr_cond['trial_single_blind']== False).astype(int)\n euctr_cond['trial_single_blind'] = (euctr_cond['trial_single_blind']== True).astype(int)\n euctr_cond['not_healthy_volunteers'] = (euctr_cond['subject_healthy_volunteers']== False).astype(int)\n euctr_cond['subject_healthy_volunteers'] = (euctr_cond['subject_healthy_volunteers']== True).astype(int)\n\n # Nick's notebook used pandas.notna, we reimplement a simplified version\n # here for compatibility with pandas 0.19\n def euctr_notna(x):\n return not (x is None or x is np.nan)\n euctr_cond['trial_results'] = (euctr_cond['trial_results'].apply(euctr_notna)).astype(int)\n\n euctr_cond.rename(columns={'full_title_of_the_trial':'full_title', 'name_or_abbreviated_title_of_the_trial_where_available': 'abbreviated_title'}, inplace=True)\n euctr_cond['non_eu'] = euctr_cond.eudract_number_with_country.str.contains('-3rd').astype(int)", "def transform(self, dataset, labels):\n print(f\"Dropping {len(self.deficient)} deficient features...\")\n dataset.drop(columns=self.deficient, inplace=True)\n print(f\"Scanning {len(dataset)} samples for duplicates...\")\n duplicates = dataset.duplicated()\n print(f\"Dropping {sum(duplicates)} duplicate samples...\")\n dataset.drop(index=dataset.index[duplicates], inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n labels.drop(labels=labels.index[duplicates], inplace=True)\n labels.reset_index(drop=True, inplace=True)\n return dataset, labels", "def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()", "def get_question():\n\n fi = open('nlpcc-iccpol-2016.kbqa.training-data','r',encoding='utf8')\n fii = open('nlpcc-iccpol-2016.kbqa.testing-data','r',encoding='utf8')\n\n q=''\n\n train = []\n countChar = {}\n m_word = 0\n i = 0\n for line in fi:\n# print(f'line: {line}')\n if line[1] == 'q':\n# print(f'line Q: {line}')\n q = line[line.index('\\t') + 1:].strip()\n if len(q) > m_word:\n m_word = len(q)\n train.append(q)\n# print(f'filtered Q: {q}')\n for char in q:\n if char not in countChar:\n countChar[char] = 1\n else:\n countChar[char] += 1\n# elif line[1] == 't':\n# print(f'line P: {line}')\n# sub = line[line.index('\\t') + 1:line.index(' |||')].strip()\n# qNSub = line[line.index(' ||| ') + 5:]\n# pre = qNSub[:qNSub.index(' |||')]\n# print(f'sub:{sub}')\n# print(f'qNSub:{qNSub}')\n# print(f'pre:{pre}')\n\n test = []\n for line in fii:\n# print(f'line: {line}')\n if line[1] == 'q':\n# print(f'line Q: {line}')\n q = line[line.index('\\t') + 1:].strip()\n if len(q) > m_word:\n m_word = len(q)\n test.append(q)\n# print(f'filtered Q: {q}')\n for char in q:\n if char not in countChar:\n countChar[char] = 1\n else:\n countChar[char] += 1\n \n \n with open('train.txt', 'w', encoding='utf-8') as f:\n f.write('\\n'.join(train))\n with open('test.txt', 'w', encoding='utf-8') as f:\n f.write('\\n'.join(test))\n \n # Save\n np.save('words.npy', countChar)\n \n print(m_word)\n\n# # Load npy dict\n# read_dictionary = np.load('my_file.npy').item()\n# print(read_dictionary['hello']) # displays \"world\"\n \n \n return m_word", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def clean(args):\n with_dataset(args, Dataset._clean)", "def shuffle_dataset(self):\n # TODO explain approached used for selecting training and test data\n labels = self.dataset.label.unique()\n good_jobs = self.dataset[self.dataset.label == \"Good\"]\n bad_jobs = self.dataset[self.dataset.label == \"Bad\"]\n\n # TODO n>2 probablly won't work the way it's supposed to currently\n if len(labels) == 2:\n # oversample\n resize = max(len(good_jobs.label),len(bad_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label))\n good_jobs_re = good_jobs.sample(resize)\n bad_jobs_re = bad_jobs.sample(resize)\n dataset = pd.concat([good_jobs_re, bad_jobs_re])\n elif len(labels) == 3:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n # oversample\n resize = max(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re])\n elif len(labels) == 4:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n ideal_jobs = self.dataset[self.dataset.label == \"Ideal\"]\n\n # middle of the road approach\n resize = int(mean([len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label),len(ideal_jobs.label)]))\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n ideal_jobs_re = ideal_jobs.sample(resize,replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re,ideal_jobs_re])\n\n train,test = train_test_split(dataset,test_size=0.25,stratify = dataset.label,shuffle=True)\n #test = self.dataset[~self.dataset.isin(train)].dropna()\n #test = self.dataset[(~dataset.label.isin(self.dataset.label))&(~dataset.description.isin(self.dataset.description))]\n #0tr_hashes = [hash(tuple(d)) for d in train.description]\n #ytest = [val for iter,val in self.dataset.iterrows() if hash(tuple(val.description)) not in tr_hashes]\n\n self.y_train,self.y_test = train.label.values,test.label.values\n self.X_train,self.X_test = train.description.values,test.description.values", "def prepare_for_supervised(embeddings):\n \n X, y = [], []\n for problem in embeddings:\n pairs = [np.concatenate((problem['question'], problem[i])) \n for i in ('choice_0', 'choice_1', 'choice_2', 'choice_3')]\n correct_answer = problem['correct_answer']\n \n labels = [0, 0, 0, 0]\n labels[correct_answer] = 1\n X.extend(pairs)\n y.extend(labels)\n return X, y", "def OBQADataset(obqa_type, with_retrieved_facts=False):\n assert(isinstance(obqa_type, OBQAType))\n download_dataset(Collection.ALLEN_AI_OBQA, check_shallow_integrity)\n all_data = []\n all_ids = set()\n retrieved_facts = None\n if with_retrieved_facts:\n retrieved_facts = json.load(open(os.path.join(OBQA_CACHE_DIR, \"extracted_facts.json\"))) # noqa: E501\n with open(type_to_data_file(obqa_type), \"rt\") as f:\n for line in f:\n entry = json.loads(line)\n assert(len(entry) == 3)\n\n question_id = entry[\"id\"]\n correct_answer = entry[\"answerKey\"]\n assert(isinstance(question_id, string_types))\n assert(correct_answer in [\"A\", \"B\", \"C\", \"D\"])\n\n entry = entry[\"question\"]\n assert(len(entry) == 2)\n question = entry[\"stem\"]\n answers = [\n choice[\"text\"]\n for choice in sorted(\n entry[\"choices\"],\n key=lambda x: x[\"label\"])\n ]\n assert(isinstance(question, string_types))\n assert(len(answers) == 4)\n for answer in answers:\n assert(isinstance(answer, string_types))\n assert(question_id not in all_ids)\n all_ids.add(question_id)\n new_row = {\n \"id\": question_id,\n \"question\": question,\n \"answers\": answers,\n \"correct\": correct_answer\n }\n if with_retrieved_facts:\n queries = [question + \" \" + answer for answer in answers]\n facts = [retrieved_facts[query] for query in queries]\n assert(len(facts) == 4)\n for fact in facts:\n assert(len(fact) == 3)\n assert(isinstance(fact, dict))\n assert(\"context\" in fact)\n assert(\"token_based\" in fact)\n assert(\"vector_based\" in fact)\n new_row[\"retrieved_facts\"] = facts\n all_data.append(new_row)\n assert(len(all_data) == len(all_ids))\n df = pd.DataFrame(all_data)\n return df", "def clean_data():\n pd.set_option('display.max_columns', None)\n try:\n df = pd.read_csv('test1/movie.csv')\n except FileNotFoundError:\n df = pd.read_csv('movie.csv')\n\n df.drop(labels=[\"actor_3_facebook_likes\", \"actor_2_name\",\n \"actor_1_facebook_likes\", \"actor_1_name\",\n \"num_voted_users\",\n \"cast_total_facebook_likes\", \"actor_3_name\",\n \"facenumber_in_poster\", \"movie_imdb_link\",\n \"num_user_for_reviews\", \"actor_2_facebook_likes\",\n \"aspect_ratio\", \"color\", \"num_critic_for_reviews\",\n \"director_facebook_likes\"], axis=1, inplace=True)\n df.dropna(subset=[\"gross\"], axis=0, inplace=True)\n return df", "def set_up_data():\r\n \r\n X, Y = pretreatment.import_dataset()\r\n \r\n print('Applying cleansing...')\r\n X = pretreatment.pretreatment(X)\r\n Y = pretreatment.pretreatment(Y)\r\n \r\n indice = [i for i in range(len(X)) if (len(X[i]) > SENTENCE_LENGTH-2 and len(X[i]) < SENTENCE_LENGTH+1 and len(Y[i]) > SENTENCE_LENGTH-2 and len(Y[i]) < SENTENCE_LENGTH+1)]#(len(X[i]) > SENTENCE_LENGTH and len(X[i]) < 2 * SENTENCE_LENGTH and len(Y[i]) > SENTENCE_LENGTH and len(Y[i]) < 2 * SENTENCE_LENGTH)]\r\n X = [X[i] for i in indice]\r\n Y = [Y[i] for i in indice]\r\n \r\n X = pretreatment.standardize_sentence_length(X)\r\n Y = pretreatment.standardize_sentence_length(Y)\r\n \r\n print('Computing the corpus sizes...')\r\n compute_T(X, 'english')\r\n compute_T(Y, 'french')\r\n compute_S(X, 'english')\r\n compute_S(Y, 'french')\r\n compute_N(X, 'french')\r\n compute_N(Y, 'english')\r\n \r\n print('English corpus: %d tokens' % T_ENGLISH)\r\n print('French corpus: %d tokens' % T_FRENCH)\r\n print('English sentence length: %d' % S_ENGLISH)\r\n print('French sentence length: %d' % S_FRENCH)\r\n print('Number of sentences (both english and french): %d / %d' % (N_ENGLISH, N_FRENCH))\r\n \r\n print('Converting in one hot vectors')\r\n global CORPUS_ENGLISH, CORPUS_FRENCH\r\n params_ENGLISH = (N_ENGLISH, S_ENGLISH, T_ENGLISH)\r\n params_FRENCH = (N_FRENCH, S_FRENCH, T_FRENCH)\r\n X, CORPUS_ENGLISH= treatment.convert_to_one_hot(X, params_ENGLISH)\r\n Y, CORPUS_FRENCH= treatment.convert_to_one_hot(Y, params_FRENCH)\r\n \r\n return (X, Y)" ]
[ "0.6497021", "0.6374592", "0.63526404", "0.63425726", "0.60416114", "0.6011495", "0.59849167", "0.5970139", "0.59679186", "0.59492254", "0.59333664", "0.5916948", "0.5895329", "0.5882728", "0.5866233", "0.5852494", "0.578167", "0.57745177", "0.57384783", "0.57150567", "0.5704251", "0.56994456", "0.56967425", "0.5696371", "0.5687815", "0.5684607", "0.5642552", "0.5631884", "0.56273764", "0.5616116" ]
0.72315687
0
Helper function to get the dates out of the beautifulsoup response object
def get_dates(soup): return [res.get_text() for res in soup.find_all('a', attrs={'href':re.compile('/events/')})]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_listing_date(soup, dates):\r\n pdate = soup.find_all(name = 'div', class_='pdate')\r\n for i in pdate:\r\n # input(i)\r\n text = i.get_text()\r\n date = text.split(':')\r\n date = date[2:3]\r\n\r\n dates.append(date)\r\n\r\n return dates", "def parse_dates(page, logger=None):\n\n result = []\n\n soap = BeautifulSoup(page, 'lxml')\n spans = soap.findAll('span', class_='vc-focusable')\n for span in spans:\n if 'vc-text-gray-400' not in str(span):\n if logger:\n logger.debug('Available date was found')\n label = span.get('aria-label')\n if logger:\n logger.debug(f'aria-label: {label}')\n date = arrow.get(label, 'dddd, MMMM D, YYYY')\n date = date.format('DD-MM-YYYY (dddd)')\n if logger:\n logger.debug(f'date: {date}')\n result.append(date)\n if logger:\n logger.debug(f\"'{date}' date was added\")\n return result", "def parse(self, day, response): # pylint: disable=no-self-use\n soup = BeautifulSoup(response.text, 'html.parser')\n uls = soup.find_all('ul')\n\n # The page for February 29 has a slightly different structure.\n if utils.is_leap_day(day):\n events = uls[3]\n else:\n events = uls[1]\n\n return events", "def date(self, response):\n\t\tx = response.xpath(\"//div[@class='wrapper appendbottom-10']/div/p/text()\")[-1].re('(\\w+)')\n\t\treturn x", "def _get_case_dates(self):\n path = \"//path/to/text/text()\"\n return [\n convert_date_string(date_string)\n for date_string in self.html.xpath(path)\n ]", "def extract_datetime(self, response):\n query = self.extract_datetime_query\n extracted = response.css(query).extract()[1]\n return parser.parse(extracted)", "def extract_datetime(self):\n\n # LegutolsΓ³ frissΓ­tΓ©s dΓ‘tuma: 2020.03.24. 11:15\n doc = html.document_fromstring(self.req.text)\n el = doc.xpath('.//div[contains(@class, \"view-diagrams\")]')\n if el:\n text = \"\".join(\n el[0].xpath('.//text()')\n )\n # <p>LegutolsΓ³ frissΓ­tΓ©s dΓ‘tuma: 2020.03.24. 11:15 </p>\n re_dt = re.compile(r\"LegutolsΓ³ frissΓ­tΓ©s dΓ‘tuma: (.*?)\\n\")\n dt_from_re = re_dt.findall(text)\n\n if not dt_from_re:\n raise Exception(\"Did not find datetime from webpage\")\n\n dt_from_re = dt_from_re[0]\n dt_from_re = dateutil.parser.parse(dt_from_re)\n self.dt = dt_from_re", "def parse():\n G.go(SITE_URL)\n articles = []\n for article in G.doc.select(\"//li[@class='regularitem']\"):\n header = article.select('h4').text()\n text = article.select('div').text()\n url = article.select('h4/a/@href').text()\n dt_string = article.select('h5').text()\n # for date format \"1 Nov 2019 00:00:00\" or \"01 Nov 2019 00:00:00\"\n article_dt = re.search(r'\\d{1,2} [a-zA-Z]+ \\d{4} \\d{2}:\\d{2}:\\d{2}', dt_string)\n if article_dt is None:\n logging.exception('Datestring format is unknown: %s', dt_string)\n continue\n article_dt = article_dt.group(0)\n article_dt = datetime.datetime.strptime(article_dt, '%d %b %Y %H:%M:%S').strftime(\"%Y-%m-%d %H:%M:%S\")\n articles.append({'header': header, 'url': url, 'text': text, 'dt': article_dt})\n return articles", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"η™Όδ½ˆζ—₯期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text", "def parseDate(date):\r\n\tyearNode = date.getElementsByTagName('year').item(0)\r\n\tmonthNode = date.getElementsByTagName('month').item(0)\r\n\tdayNode = date.getElementsByTagName('day').item(0)\r\n\r\n\tyear = yearNode.firstChild.data\r\n\tmonth = monthNode.firstChild.data\r\n\tday = dayNode.firstChild.data\r\n\r\n\treturn year, month, day", "def _parse_upcoming(self, response):\n year_title = response.css('.text-area-full h2.text-align-center *::text').extract_first()\n upcoming_year = re.search(r'^\\d{4}', year_title).group(0)\n date_list = []\n # Get list of month names to check in regular expression\n months = [datetime(int(upcoming_year), i, 1).strftime('%B') for i in range(1, 13)]\n for item in response.css('.text-area-full table.text-align-center td *::text'):\n item_text = item.extract()\n # See if text matches date regex, if so add to list\n date_match = re.search(r'({}) \\d{{1,2}}'.format('|'.join(months)), item.extract())\n if date_match:\n date_str = '{} {}'.format(date_match.group(), upcoming_year)\n date_dict = {\n 'start': {\n 'date': datetime.strptime(date_str, '%B %d %Y').date()\n },\n 'sources': [{\n 'url': response.url,\n 'note': ''\n }],\n }\n date_dict['status'] = self._generate_status(date_dict, text=item_text)\n date_list.append(date_dict)\n return date_list", "def open_hotel_date(self, soup):\n logging.info('Getting hotel booking registration date.')\n if soup.select_one('span.hp-desc-highlighted') is None:\n logging.error('Cant get hotel date.')\n return ''\n else:\n open_date_text = soup.select_one('span.hp-desc-highlighted').text.strip()\n if \" с \" in open_date_text:\n index = soup.select_one('span.hp-desc-highlighted').text.strip().find(\" с \")\n date = open_date_text[index+3:].replace('.', '')\n try:\n day, month, year = date.split(' ')\n month = RU_MONTH_VALUES[month[0:3]]\n date = '/'.join([day, month, year])\n except Exception:\n logging.error('Cant get hotel date.')\n return ''\n return date\n else:\n logging.error('Cant get hotel date.')\n return ''", "def dateReview(soup: str, nb:int):\n dateR = []\n for span in soup.findAll('article', attrs={'itemprop': 'review'}):\n dat = str(recovTextBetweenTags(str(span.findAll('time', attrs={\n 'itemprop': 'datePublished'})), ',')).replace(\"['[\", '').replace(\"]']\", '')\n dat = (format_date(dat))\n\n if (dat) > (datetime.now() - timedelta(nb)):\n top = span.findAll('time', attrs={'itemprop': 'datePublished'})\n dateR.append(recovTextBetweenTags(str(top), ','))\n\n return dateR", "def _parse_link_date_map(self, response):\n link_date_map = defaultdict(list)\n for link in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16\"\n )[:1].css(\"a\"):\n link_str = link.xpath(\"./text()\").extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append(\n {\n \"title\": re.sub(r\"\\s+\", \" \", link_str.split(\" – \")[-1]).strip(),\n \"href\": link.attrib[\"href\"],\n }\n )\n for section in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel\"\n ):\n year_str = section.css(\".vc_tta-title-text::text\").extract_first().strip()\n for section_link in section.css(\"p > a\"):\n link_str = section_link.xpath(\"./text()\").extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append(\n {\n \"title\": re.sub(\n r\"\\s+\", \" \", link_str.split(\" – \")[-1]\n ).strip(),\n \"href\": section_link.xpath(\"@href\").extract_first(),\n }\n )\n return link_date_map", "def extract_dates(data):\n dates = []\n \n for line in data.splitlines():\n if line[6:8] == \"20\":\n dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date())\n \n return list(set(dates))\n pass", "def get_dates_for_papers(tree):\n\tns = 'http://exslt.org/regular-expressions'\n\tpath = '//a[re:match(@href, \"http://eprints.gla.ac.uk/[0-9]+/\")]/preceding-sibling::text()'\n\t# gets a list of strings, each containing a date of a paper (amongst other irrelevant characters)\n\ttext_containing_dates = tree.xpath(path, namespaces={'re':ns})\n\t# join the list into a single string so that regular expression can be used on it\n\ttext_as_string = \" \".join(text_containing_dates)\n\t# extract the dates from the string into a list\n\tdates = re.findall(\"[0-9]+\", text_as_string)\n\treturn dates", "def scrape_dates(self, main_content):\n container = main_content.find('div', {'class': 'field field-name-field-datetime field-type-datestamp '\n 'field-label-above'})\n\n date_spans = container.find_all('span', {'class': 'date-display-single'})\n\n dates_list = []\n\n for date_span in date_spans:\n is_all_day = False\n match = self.all_day_regex.match(date_span.text)\n if match:\n is_all_day = True\n dates_list.append((date_span['content'], is_all_day))\n\n return dates_list", "def get_dates(season, info):\n url = 'http://www.basketball-reference.com/leagues/NBA_{0}_games.html'.format(season.split('-')[-1])\n rv = requests.get(url)\n soup = BeautifulSoup(rv.text)\n seasons = soup.find_all('table', {'class': 'sortable stats_table'})\n if len(seasons) == 2:\n reg_season, post_season = seasons\n else:\n reg_season, post_season = seasons[0], None\n dates = set()\n for table in [reg_season, post_season]:\n if table:\n rows = table.tbody.find_all('tr')\n for row in rows:\n match = row.find('a', href=True, text='Box Score')\n if match:\n match_code = match['href'].split('/')[2].split('.')[0]\n date = match_code[:-4]\n if info == 'money_lines':\n date = \"-\".join([date[:4], date[4:6], date[6:]])\n dates.add(date)\n return sorted(list(dates))", "def get_date(logs_feed, date_loc=3):\n\n line = logs_feed[date_loc]\n date = re.findall('\\d+-\\d+-\\d+', line)\n return date", "def _get_dates(self, board_div: HtmlElement):\n # I expect there to be two uls. The first with the board dates and the\n # second with the general membership dates\n uls = board_div.xpath(\"ul\")\n if len(uls) != 3:\n raise PageChangedException()\n board_ul, general_ul, advisory_ul = uls\n lac_uls = board_div.xpath(\"./div/div/ul\")\n if len(lac_uls) != 1:\n raise PageChangedException()\n lac_ul = lac_uls[0]\n\n board_lis = self._lis_from_ul(board_ul)\n general_lis = self._lis_from_ul(general_ul)\n advisory_lis = self._lis_from_ul(advisory_ul)\n lac_lis = self._lis_from_ul(lac_ul)\n\n board_dates = self._date_from_lis(board_lis)\n general_dates = self._date_from_lis(general_lis)\n advisory_dates = self._date_from_lis(advisory_lis)\n lac_dates = self._date_from_lis(lac_lis)\n return (board_dates, general_dates, advisory_dates, lac_dates)", "def get_dates(txt):\n txt = re.sub(r'[^\\w\\s]', '', txt)\n txt_token = txt.split()\n return get_dates_from_token_list(txt_token)", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "async def _parse_source_response_date_time(self, response: Response) -> datetime:\n datetime_parts = [\n int(part) for part in (await self._soup(response)).find(id=\"start_of_the_test\").string.split(\".\")\n ]\n return datetime(*datetime_parts) # type: ignore", "def date_parser(dates):\n # extract the date only from dates: Olwethu\n date_list = []\n for i in dates:\n i = i.split(' ')\n # append each date to a new list: Olwethu\n date_list.append(i[0])\n \n return date_list", "def get_date(data):\r\n data = json.loads(data)\r\n dates = data.get(\"ReceiptData\", {\"orderDate\": []})\r\n \r\n # Make sure we get all products in the cart.\r\n return dates['orderDate']", "def scrape(self):\n #Get page\n soup, _ = getPage(self.url)\n\n #Check if page available\n if soup is None:\n #Not available, skip iteration\n self.na = True\n return\n\n #Get Price\n self.price = soup.find(class_=\"user-ad-price__price\").get_text()\n #Get list of attr names and values\n adAttrVals = soup.find_all(class_=\"vip-ad-attributes__value\")\n adAttrName = soup.find_all(class_=\"vip-ad-attributes__name\")\n #Search attrs for date listed\n for i in range(0,len(adAttrName)):\n if adAttrVals[i].contents[0] == \"Date Listed\":\n self.listDate = adAttrName[i].contents[0]\n break", "def get_cur_date(self):\n tmp = self.soup.find('small', text=re.compile('market', re.IGNORECASE)).text.split('Market')[0].strip()\n\n # assign year\n self.year = Settings.year.search(tmp).group(0)\n\n # assign day\n self.day = Settings.day.search(tmp).group(0)\n\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n\n # iterate over months and flag if match found\n for ii, mo in enumerate(months, 1):\n more = re.compile(mo, re.IGNORECASE)\n if more.search(tmp):\n self.month = ii\n break", "def Dates(self):\n data = self.DictData()\n dates = [ row[ \"Date\"] for row in data ]\n return dates", "def parse_date(article):\n pub_date = article.find(\"publicationstmt\")\n year = pub_date.find(\"date\")\n year = year.attrs.get(\"when\") if year is not None else \"\"\n return year" ]
[ "0.7227621", "0.71890837", "0.69590205", "0.68518174", "0.6651197", "0.66487515", "0.6525932", "0.6520851", "0.6460153", "0.64458275", "0.63115686", "0.62843174", "0.6270317", "0.62502563", "0.62217456", "0.621551", "0.60811263", "0.6070497", "0.60091525", "0.5976291", "0.5970385", "0.59682894", "0.59363645", "0.5933858", "0.5931512", "0.59208876", "0.58800143", "0.58754027", "0.58720803", "0.5863278" ]
0.75097996
0
Helper function to impute the dates from the proboxingodds website into the tables that contain the fights.
def impute_dates(tables, dates): new_fights = [] for idx, date in enumerate(dates): if date == 'FUTURE EVENTS': break tables[idx]['Date'] = date for table in tables[:-1]: fights = [table[x:x+2] for x in range(0, len(table), 2)] for idxf, fight in enumerate(fights): fight.reset_index(drop=True, inplace=True) fight['Time'] = fight['Time'][0] new_fights.append(fight) return new_fights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_proboxingodds(soup):\n tables = []\n site = pd.read_html(str(soup))\n for idx, table in enumerate(site):\n if idx == 0:\n pass\n elif idx % 2 != 0:\n sliced = table[(table['Unnamed: 0'].str.contains(':')) | (table['Unnamed: 0'] == 'UTC')]\n sliced = sliced.rename({'Unnamed: 0':'Time', 'Unnamed: 1':'Fighter'}, axis=1)\n sliced['Fivedimes'] = sliced['5Dimes']\n sliced['WilliamH'] = sliced['William\\xa0H.']\n sliced['SportsInt'] = sliced['SportsInt.']\n sliced.drop(columns=['Props', 'Props.1', 'Props.2', '5Dimes', 'William\\xa0H.', 'SportsInt.'], inplace=True)\n sliced['last_updated'] = datetime.datetime.now()\n for i in sliced.columns.to_list()[2:-1]:\n sliced[i] = sliced[i].apply(lambda x: remove_arrows(x))\n sliced[i] = sliced[i].apply(lambda x: amer_to_dec(x))\n tables.append(sliced)\n dates = get_dates(soup)\n tables = impute_dates(tables, dates)\n tables = [impute_fightID(table) for table in tables]\n tables = [db_handler.check_db_for_models(table) for table in tables]\n return tables", "def dayInterpA(table, date):\n iDate = date.strftime('%Y-%m-%d')\n # age 0 to 99\n popi = table[table.date1 == iDate]\n\n # Remove the columns for age 100 and the date1\n popi = popi.iloc[:,0:100]\n\n # store the popi results into an array for the interpolation\n #popi = (np.asarray(popi)).tolist()\n popi = np.asarray(popi)\n\n # Interpolate the age in Days\n iuspl2 = InterpolatedUnivariateSpline(AGE3, popi/365)\n iuspl2_pred = iuspl2(AGEOUT)\n\n # the output\n merged = pd.DataFrame(index = range(0,len(AGEOUT)), columns = ['AGE','POP'])\n merged['AGE'] = AGEOUT\n merged['POP'] = iuspl2_pred\n return merged", "def preprocess(table):\n # drop the column Zeitraum\n table = table.drop('Zeitraum', axis=1)\n # drop the rows containing the true results of the elections\n Idx = np.where(table.Befragte=='Bundestagswahl')[0]\n Idx = np.append(Idx, np.where(table['CDU/CSU'].str.contains('Umfrage'))[0])\n table = table.drop(Idx)\n table.index = np.arange(table.shape[0])\n # replace the strings %,-\n table = table.replace('%', '', regex=True)\n table = table.replace(',', '.', regex=True)\n table = table.replace('[–?]', '', regex=True)\n # fix the column Befragte !!!!!!!!!!!!!!\n table.Befragte = table.Befragte.replace('[T β€’ ?β‰ˆO β€’ .]', '', regex=True)\n # replace all empty entries with NaN\n table = table.replace('', 'NaN', regex=True)\n\n # if the colomn Sonstige contains entries with more than one number\n try: \n table.Sonstige = table.Sonstige.astype(float)\n except ValueError:\n for i, n in enumerate(table.Sonstige):\n if len(n) > 2:\n digits = np.array([digit for digit in np.arange(10).astype(str) if digit in n])\n table.Sonstige[i] = digits.astype(int).sum()\n table.Sonstige = table.Sonstige.astype(float)\n\n # convert all numbers to float\n table[table.keys()[1:]] = table[table.keys()[1:]].astype(float)\n # convert the date to type date\n table.Datum = pd.to_datetime(table.Datum, format='%d.%m.%Y').dt.date\n return table", "def populate(table_name, date):\n\tlog_msg3(\"Populando \" + table_name)\n\n\twsq_to_txt(table_name, date)\n\n\t# si es un nuevo aΓ±o se crea una nueva tabla\n\tif(is_new_year(table_name) and not new_tables_created):\n\t\tcreate_tables()\n\n\ttxt_to_table(table_name)\n\n\tlog_msg_ok3()", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def weekly(evictiondata):\r\n evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in evictions_per_week.keys():\r\n evictions_per_week[row['week_date']] = row['filings_2020']\r\n else:\r\n evictions_per_week[row['week_date']] += row['filings_2020']\r\n return evictions_per_week", "def get_date_pred():\r\n \r\n date_now = dt.datetime.now()\r\n date_pred = [date_now - dt.timedelta(days=1)+dt.timedelta(days=i) for i in range(8)]\r\n month_pred = [item.month for item in date_pred]\r\n day_pred = [item.day for item in date_pred]\r\n \r\n return date_pred,month_pred,day_pred", "def tbl_restrns_date(self,*expect_restrns):\n\n for count,restrn in enumerate(self.final_dataframe.keys()):\n \n if 'No Key Found' not in expect_restrns:\n \n if count in (2,3,4,5) and restrn in expect_restrns:\n \n \"\"\"\n 2 == FTNT, 3 == FARERULE, 4 == ALTRULE, 5 == GENRULE\n \"\"\"\n \n if len(self.final_dataframe[restrn]) != 0 and (self.final_dataframe[restrn].NOAPPL.isnull().any()):\n \"\"\"\n if table restrictions are not empty and NOAPLL is null for e.g. FR has records with NOAPP is null,\n then call to restrn_date function to caputre restriction dates\n \"\"\"\n \n self.restrn_date(restrn)\n \n elif count == 6:\n pass\n \n elif 'No Key Found' in expect_restrns:\n \n if count in (2,3,4,5) and restrn in expect_restrns and len(self.final_dataframe[restrn]) != 0 and (self.final_dataframe[restrn].NOAPPL.notnull().any()):\n \n self.restrn_date(restrn)\n \n elif count == 6:\n pass", "def get_di_date(d):\r\n data_area = driver.find_element_by_xpath('//*[@id=\"Data\"]') # find date input area\r\n data_area.clear()\r\n data_area.send_keys(d) # cleans date field and inserts our date\r\n okbutton = driver.find_elements_by_xpath('//*[@id=\"divContainerIframeBmf\"]/form/div/div/div[1]/div[2]/div/div[2]') #get the ok button\r\n okbutton[0].click() # click on the ok button\r\n w_url = requests.get(str(driver.current_url)).text \r\n soup = BeautifulSoup(w_url, 'lxml') # use beautiful soup to deal with the html of the current url we loaded at our date\r\n infoslist = []\r\n for tr in soup.findAll(\"table\"):\r\n for td in tr.find_all(\"td\"):\r\n if not td.attrs.get('style'):\r\n infoslist.append(td.text) # get the elements of the table we need\r\n infoslist_p = parsing(infoslist) # use parsing function to reorganize\r\n di = pd.DataFrame(infoslist_p) # create the data frame with the information\r\n di.drop(2, axis = 1, inplace = True)\r\n rename_cols = {0: 'Vertices', 1: 'DIxPRE 252'} \r\n di.rename(columns = rename_cols, inplace = True) #renaming the columns accordingly\r\n di = di.apply(lambda x: x.str.replace(',','.'))\r\n di['Vertices'] = pd.to_numeric(di['Vertices'], errors = 'coerce')\r\n di['DIxPRE 252'] = pd.to_numeric(di['DIxPRE 252'], errors = 'coerce')\r\n return di", "def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}", "def date_col_imputation(self, columns):\n for column in columns:\n if column in self.col_with_nulls:\n if not self._pandas_flag:\n mode = self.data_frame.select(column).toPandas().mode().values[0][0]\n self.data_frame = self.data_frame.fillna({ column:mode })\n else:\n self.data_frame[column] = self.mode_impute(self.data_frame[column])\n self.data_change_dict['ModeImputeCols'].append(column)", "def date_prediction(config):\n if config['functionality'] == 'best_flights':\n departure_flight_date = date(config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n div = config['prediction_period_days'] / 7\n dates_search = []\n for x in range(0, div + 1):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x * 7)),\n (return_flight_date + datetime.timedelta(days=x * 7))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search\n elif config['functionality'] == 'flight_trends':\n departure_flight_date = date(\n config['departure_flight']['departure_date'][0],\n config['departure_flight']['departure_date'][1],\n config['departure_flight']['departure_date'][2])\n return_flight_date = date(config['return_flight']['departure_date'][0],\n config['return_flight']['departure_date'][1],\n config['return_flight']['departure_date'][2])\n dates_search = []\n for x in range(0, config['prediction_period_days']):\n dates_search.append(\n [(departure_flight_date + datetime.timedelta(days=x)),\n (return_flight_date + datetime.timedelta(days=x))])\n for i in dates_search:\n i[0] = str(i[0])\n year, month, day = i[0].split(\"-\")\n i[0] = \"%s/%s/%s\" % (day, month, year)\n i[1] = str(i[1])\n year, month, day = i[1].split(\"-\")\n i[1] = \"%s/%s/%s\" % (day, month, year)\n return dates_search", "def parse_html(self, html):\n day = [\"MONDAY\", \"TUESDAY\", \"WEDNESDAY\", \"THURSDAY\", \"FRIDAY\", \"SATUARDAY\", \"SUNDAY\"]\n rem = [\"Time\", \"Captain\", \"Crew\", \"Aircraft\", \"Module\", \"Exercise\", \"Description\", \"Fly Type\"]\n #schedules_dictionary= {\"DATE\": \"\", \"CAPITAN\": \"\", \"CREW\": \"\", \"AIRCRAFT\": \"\", \"MODULE\": \"\", \"EXCERCISE\": \"\", \"DESCRIPTION\": \"\", \"FLY_TYPE\": \"\"}\n schedules_dictionary = {}\n\n \n table_data = []\n table_data_without_header = []\n day_index = []\n \n if len(html) == 0:\n return \"ERROR: parse_html - len(html) = 0\"\n \n soup = BeautifulSoup(html, 'lxml')\n table_rows = soup.find(\"table\").find_all(\"tr\")\n\n # Extract table data from table \n for tr in table_rows:\n td = tr.find_all(\"td\")\n for i in td:\n table_data.append(i.text)\n \n # Remove table headers\n for i in table_data:\n if i in rem or i == \"\":\n continue\n else:\n table_data_without_header.append(i)\n\n # Extract index of dates\n for td in table_data_without_header:\n for d in day:\n if d in str(td):\n day_index.append((table_data_without_header.index(td)))\n \n # Populate schedules_dictionary \n final_list = []\n\n for a in day_index:\n temporary_dict = {}\n date_time = table_data_without_header[a].split(\"-\", 2)[1].replace(\" \", \"\")\n date_time = date_time + \" \" + table_data_without_header[a + 1]\n temporary_dict[\"DATE\"] = str(self.format_date(date_time, \"iso\"))\n\n if \":\" in table_data_without_header[(a + 1)]:\n temporary_dict[\"CAPITAN\"] = table_data_without_header[(a + 2)]\n temporary_dict[\"CREW\"] = table_data_without_header[(a + 3)]\n temporary_dict[\"AIRCRAFT\"] = table_data_without_header[(a + 4)]\n temporary_dict[\"MODULE\"] = table_data_without_header[(a + 5)]\n temporary_dict[\"EXCERCISE\"] = table_data_without_header[(a + 6)]\n temporary_dict[\"DESCRIPTION\"] = table_data_without_header[(a + 7)]\n temporary_dict[\"FLY_TYPE\"] = table_data_without_header[(a + 8)]\n \n final_list.append(temporary_dict)\n schedules_dictionary[\"return\"] = final_list\n\n return schedules_dictionary", "def date_setup(date, page_offset, url,c):\r\n\r\n if date <= 10:\r\n page_offset = 0\r\n url = \"http://data.terapeak.com/?id=0&search=1&view=item_browse&query=iphone+5s&date=2015-02-1&date_range=1&buyer_country_id=1&condition=rollup_3&type%5Bfixed%5D=1&from_start_price=100&to_start_price=800&from_end_price=100&to_end_price=800&seller_country_id=1&txn_site_id=0&numPages=12&siteID=0&offset={0}\".format(page_offset)\r\n u = list(url)\r\n new = str(date)\r\n u[86] = new #this will update the date from date=2014-09-1 to date=2014-09-2\r\n date_ed_url = \"\".join(u)\r\n #print(edited)\r\n page_offset_update(date, page_offset, date_ed_url, c) # the date has now been updated and the page_offset has been reset to 0\r\n else:\r\n with open(\"5s_Feb_2015_.csv\", \"w\", newline='', encoding='UTF-8') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(listof_listof_lists)\r\n print(\"done\")\r\n quit", "def import_precip_data(counties):\n for index, row in counties.iterrows():\n station = row[2]\n url = f'https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+0{station}+por+por+pcpn+none+msum+5+01+F'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n table = soup.find('table')\n data = pd.read_html(str(table))\n df = data[0]\n df.columns = df.iloc[0]\n df = df.drop([0])\n df = df.iloc[-65:-8, :]\n df = df.rename(columns={'YEAR(S)': 'Year'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n df = df.set_index('Year')\n df = df.dropna(axis=1)\n df = df.replace(to_replace='-----', value=np.nan)\n df = df.astype('float64')\n df = df.fillna(df.mean().round(2))\n df = df.add_suffix('_p')\n name = row[0]\n df['County'] = name\n df.to_csv(f'{name}_precip.csv')\n print(f'Precipitation data from {name} saved')\n time.sleep(3.14)\n print('Done')", "def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def game(hometeam, awayteam, week, year):\n\twith urlopen(spread_url(hometeam, awayteam, week, year)) as connection:\n\t\tspreads_page = connection.read()\n\t# Note that infer_types is deprecated and won't work starting in Pandas 0.14\n\tLOG.debug('Getting game %s', (hometeam, awayteam, week, year))\n\tsp = read_html(io=spreads_page.decode('utf-8'),\n\t\t\t\t\t match=\"History\", attrs={'id': 'table-000'},\n\t\t\t\t\t infer_types=False, header=0,\n\t\t\t\t\t skiprows=[1, 2, 3])\n\tif len(sp) != 1:\n\t\traise CantFindTheRightTable\n\tsp = sp.pop()\n\n\t# Get the over-under page\n\tou = read_html(io=over_under_url(hometeam, awayteam, week, year),\n\t\t\t\t match=\"History\", attrs={'cellspacing': 0},\n\t\t\t\t infer_types=False, header=0,\n\t\t\t\t skiprows=[1, 2, 3])\n\tif len(ou) != 1:\n\t\traise CantFindTheRightTable\n\tou = ou.pop()\n\n\t# Cleaning.\n\tfor t, name, date_col in (sp, 'spread', 'Unnamed: 0'), (ou, 'over_under', '\\xa0'):\n\t\tdatetime = pd.to_datetime(\n\t\t\tt[date_col]\n\t\t\t.replace(r'(\\d\\d?/\\d\\d?)', r'\\1/%d' % year, regex=True)\n\t\t\t.replace(r'(01|02)/(\\d\\d?)/\\d{4}', r'\\1/\\2/%d' % (year + 1),\n\t\t\t\t\t regex=True))\n\t\tdel t[date_col]\n\n\t\t# Replace all the '--' as missing so we can convert numbers to floats.\n\t\tfor column in t.keys():\n\t\t\tt[column] = (t[column]\n\t\t\t\t\t\t .replace('--', 'nan')\n\t\t\t\t\t\t .replace('(Pick)', 0)\n\t\t\t\t\t\t .apply(float))\n\n\t\t# Add datetime back in after the str-to-float conversion so we don't do\n\t\t# it for the datetime.\n\t\tt['datetime'] = datetime\n\n\t\t# Lowercase column names for ease of programming later\n\t\tt.columns = [h.lower() for h in t.columns]\n\n\t\t# Give spreads/over-under their suffixes\n\t\tfor col in 'pinnacle', 'betonline', 'bookmaker':\n\t\t\tt[col + '_' + name] = t[col]\n\t\t\tdel t[col]\n\n\tdata = sp.merge(ou, on=['datetime'], how='outer')\n\tassert set(data.datetime) == (set(sp.datetime) | set(ou.datetime))\n\n\t# Add this function's arguments to the table.\n\tdata['hometeam'] = hometeam\n\tdata['awayteam'] = awayteam\n\tdata['week'] = week\n\n\t# Get favored team from the big \"Odds: Washington by 4,\" that shows up at the\n\t# top of the page.\n\tsoup = BeautifulSoup(spreads_page)\n\tsubheader = soup.find('p', attrs={'class': 'h1-sub'}).find('strong')\n\tm = _FAVORED_RE.search(subheader.contents[0])\n\tif m is None or not m.group('city'):\n\t\traise ValueError(\"Couldn't figure out who was favored: %r\" %\n\t\t\t\t\t\t (subheader.contents))\n\tcity = m.group('city').replace(' ', '-').replace('.', '').lower()\n\t# city will be something like 'san-francisco' after the transformations\n\t# above. Find what team that is by looking for the links to the teams that\n\t# are also in that subheader.\n\tfor link in subheader.findAll('a'):\n\t\tlink = link['href']\n\t\tif city in link:\n\t\t\tdata['favored'] = link.split('-')[-1]\n\t\t\tbreak\n\telse:\n\t\traise ValueError(\"couldn't figure out who %s is\" % city)\n\n\treturn data", "def get_daily_goals(self, surface, dates):\n iterator = DjuDay.objects.filter(day__in=dates).order_by('day')\n return [\n [x.day, x.average * DJU_TO_KWH * KWH_TO_EUROS * surface] for x in iterator\n ]", "def impute_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n results = []\n for year, gdf in df.groupby(df.index.year):\n logger.info(f\"Imputing year {year}\")\n keep = df.columns[~gdf.isnull().all()]\n tsi = pudl.analysis.timeseries_cleaning.Timeseries(gdf[keep])\n result = tsi.to_dataframe(tsi.impute(method=\"tnn\"), copy=False)\n results.append(result)\n return pd.concat(results)", "def run_daily_hygienist(self):\n self.remove_priorities_from_all_not_due_today()", "def test_fill_data_with_days_in_dtes(self):\n date = pd.to_datetime('2009-01-15')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n self.assertTrue(len(df_iv))", "def date_cleaner(dataset):\n dataset['document_last_edition'] = dataset['meta_lastEdition']\n dataset = dataset.drop(['meta_lastEdition'], axis=1)\n \n \n \"\"\"\n Get column to correct date format\n \"\"\"\n dataset['document_last_edition'] = dataset['document_last_edition'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n \"\"\"\n meta_lastPublication renaming\n \"\"\"\n dataset['document_last_publication'] = dataset['meta_lastPublication']\n dataset = dataset.drop(['meta_lastPublication'], axis=1)\n\n # DROP HOURS/M/S\n dataset['document_last_publication'] = dataset['document_last_publication'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n # META CREATED DATE\n dataset['meta_created_date'] = dataset['meta_created_date'].str.replace('_', '-')\n dataset['meta_created_date'] = dataset['meta_created_date'].apply(lambda x: str(unify_date_format(x))[:10])\n dataset['document_created_at'] = dataset['meta_created_date']\n dataset = dataset.drop(['meta_created_date'], axis=1)\n\n # META_REVISED_MODIFIED\n dataset['document_revised_modified'] = dataset['meta_revised_modified']\n dataset = dataset.drop(['meta_revised_modified'], axis=1) \n \n \n date_column_list = ['document_created_at','document_last_edition', 'document_last_publication', 'document_revised_modified']\n \n \"\"\"\n \n THE PLAN IS TO FIRST REPLACE EMPTY SPOTS IN META_CREATED_DATE WITH CREATED_AT\n THEN WE DROP CREATED_AT\n THEN WE REPLACE EMPTY SPOTS IN OTHER COLUMNS WITH document_created_at\n \"\"\" \n \n dataset[date_column_list] = dataset[date_column_list].replace('Not Specified', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('Not Specif', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('nan', np.nan) \n dataset['document_created_at'].fillna(dataset['created_at'], inplace=True) \n dataset = dataset.drop(['created_at'], axis=1)\n \n dataset['document_last_edition'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_last_publication'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_revised_modified'].fillna(dataset['document_created_at'], inplace=True)\n \n \n\n \n \"\"\"\n FIXING NON-EXISTING DATES IN DATASET\n \"\"\"\n \n dataset = dataset.replace(['2020-1-29'], ['2020-01-29'])\n \n \n \n created_at_unique = list(dataset['document_created_at'].unique())\n last_edition_unique = list(dataset['document_last_edition'].unique())\n last_publication_unique = list(dataset['document_last_publication'].unique())\n revised_modified_unique = list(dataset['document_revised_modified'].unique())\n \n \n # IF LIST NEED TO GET UPDATED\n invalid_created_at = is_valid_date(created_at_unique)\n invalid_last_edition_unique = is_valid_date(last_edition_unique)\n invalid_last_publication_unique = is_valid_date(last_publication_unique)\n invalid_revised_modified_unique = is_valid_date(revised_modified_unique) \n invalid_dates = list(set(itertools.chain(invalid_created_at, invalid_last_edition_unique, invalid_last_publication_unique, invalid_revised_modified_unique)))\n \n \n \n \n # Non-existing dates from the list\n dataset = dataset.replace(['2019-04-31', '2016-11-31', '2019-09-31', '2015-02-31', '2017-04-31', '2015-11-31', '2015-09-31', '2017-02-29', '2018-09-31', '2017-06-31', '2018-04-31', '2015-04-31', '2018-11-31', '2017-09-31', '2015-02-29', '2019-02-29', '2019-06-31', '2018-02-29', '2016-02-30', '2016-06-31', '2016-09-31', '2018-06-31', '2019-18-03', '2020-02-31', '9999-12-31'], \n ['2019-04-30', '2016-11-30', '2019-09-30', '2015-02-28', '2017-04-30', '2015-11-30', '2015-09-30', '2017-02-28', '2018-09-30', '2017-06-30', '2018-04-30', '2015-04-30', '2018-11-30', '2017-09-30', '2015-02-28', '2019-02-28', '2019-06-30', '2018-02-28', '2016-02-28', '2016-06-30', '2016-09-30', '2018-06-30', '2019-03-18', '2020-02-28', '1999-12-31'])\n\n\n \n \n\n\n return dataset", "def get_all_data_from_main_table(soup_list):\n year_growth_list_all_pages = []\n\n for i in soup_list:\n year_growth_list_all_pages.append(get_data_from_main_table(i))\n return year_growth_list_all_pages", "def user_story_01(self):\n td=datetime.today()\n for person in self.individuals.values():\n pb=person.birthday\n pd=person.death\n if pb !=\"NA\" and pb>td:\n print(f'US01 - {person.name} birthday after today on line {person._birthday_line}')\n if pd !=\"NA\" and pd>td:\n print(f'US01 - {person.name} death after today on line {person._death_line}')\n for family in self.families.values():\n fm=family.married \n fd=family.divorced\n if fm !=\"NA\" and fm>td:\n print(f'US01 - {self.individuals[family.wife_id].name} marriage after today on line {family._married_line}')\n if fd !=\"NA\" and fd>td:\n print(f'US01 - {self.individuals[family.husb_id].name} divorce after today on line {family._divorced_line}')", "def data_preprocessing(dataframe):\n\n dataframe = dataframe.loc[:, 0:]\n dataframe.columns = [\"url\", \"date\", \"personurl\", \"name\"]\n dataframe[\"date_new\"] = [i.replace(\"DΓΌzenlendi\", \"\").replace(\"β€’\", \"\").strip() for i in dataframe[\"date\"]]\n dataframe[\"date_new\"] = [str(int(i.replace(\"yΔ±l\", \"\").strip()) * 48) if \"yΔ±l\" in i else i for i in\n dataframe[\"date_new\"]]\n dataframe[\"date_new\"] = [str(int(i.replace(\"ay\", \"\").strip()) * 4) if \"ay\" in i else i for i in\n dataframe[\"date_new\"]]\n dataframe[\"date_new\"] = [i.replace(\"hafta\", \"\").strip() if \"hafta\" in i else i for i in dataframe[\"date_new\"]]\n\n return dataframe", "def machine(date, page_offset, date_ed_url, c):\r\n\r\n print(\"me machine\")\r\n request = c.get(date_ed_url)\r\n r = request.text\r\n \r\n \r\n #print(r)\r\n if not request.ok:\r\n print (\"error\")\r\n # Something went wrong\r\n\r\n soup = BeautifulSoup(r)\r\n \r\n linkss=soup.find_all(\"tr\",{\"class\":\"row-dark\"}) \r\n tup_list = []\r\n unit_listt = []\r\n \r\n\r\n for i in linkss[0:-1]: #there is an empty list at the end for some reason...still needs to be checked\r\n unit = []\r\n i = str(i)\r\n\r\n #D soup is for description, a is the anchor\r\n\r\n D_soup=BeautifulSoup(i)\r\n x = D_soup.find_all(\"a\")[0]\r\n #d[x.get_text()] = tuple()\r\n title_key = x.get_text()\r\n unit.append(title_key)\r\n #print(title_key)\r\n\r\n #items is grabbing the prices\r\n\r\n items=D_soup.find_all(\"td\",{\"class\":\"sorted\"})\r\n if items != []:\r\n item_1 = items[0]\r\n xx = item_1.get_text()\r\n unit.append(xx)\r\n #print(\"\")\r\n #print(xx[1:])\r\n \r\n #dates is grabbing the END date for the listing\r\n \r\n dates=D_soup.find_all(\"td\",{\"class\":\"last-child\"})\r\n if items != []:\r\n date_1 = dates[0]\r\n xxx = date_1.get_text()\r\n unit.append(xxx)\r\n #print(xxx)\r\n \r\n unit_listt.append(unit)\r\n listof_listof_lists.append(unit)\r\n tupp_ = (xx, xxx)\r\n tup_list.append(tupp_)\r\n #print('')\r\n\r\n #no longer using a dict, so its commented out below\r\n #title_key = x.get_text()\r\n cnt = len(tup_list)\r\n for j in range(cnt):\r\n z[title_key] = tup_list[j]\r\n\r\n #page_offset += 25\r\n print(\"round complete\")\r\n print()\r\n print()\r\n print(len(unit_listt))\r\n print(unit_listt) #list of each individual page listings \r\n \r\n #the difference between unit_list and listof_listof_lists is that unit_list is a list of the individual session and\r\n #listof_listof_lists is a list of every session or \"page\". So if page_offset is on 75, at this location of the code, unit_list\r\n # is equal to 25 listings and listof_listof_lists is equal to 75 listings. Because each page has 25 listings, if unit_list is ever less than\r\n #25 it means we have reached the last page of the url (so the date now needs to be updated)\r\n \r\n## with open(\"clayton_writing_.csv\", \"w\", newline='', encoding='UTF-8') as f:\r\n## writer = csv.writer(f)\r\n## writer.writerows(listof_listof_lists)\r\n\r\n if len(unit_listt) < 5:\r\n print(\"here, update below\")\r\n print()\r\n page_offset += 378\r\n page_offset_update(date, page_offset, date_ed_url, c)\r\n\r\n else:\r\n print(\"not yet\")\r\n page_offset += 25\r\n page_offset_update(date, page_offset, date_ed_url, c)", "def get_dates_from_table(table):\n ths = table.find_all('th', attrs={'class': 'tide-table__day'})\n return [\n (date.fromisoformat(th.attrs['data-date']), int(th.attrs.get('colspan', 0)))\n for th in ths\n ]", "def generate_daily_matrix(full_df, feat_days):\n pred_ticker = full_df.ticker.unique()[0]\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_pred': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_pred']]\n help_df.rename({'percent_change_pred': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml.drop('percent_change_feat', axis=1)", "def fill_matchup_table_with_games(self, week, year):\n url = f'https://www.pro-football-reference.com/years/{year}/games.htm'\n schedule_html = requests.get(url)\n pro_soup = BeautifulSoup(schedule_html.content, 'html.parser')\n\n matchup_table = html_parsing.week_html_parsing(pro_soup)[0][1]\n matchup_table = matchup_table[matchup_table['Time'] != '']\n matchup_table = matchup_table.dropna()\n\n matchup_table = self.format_profootball_dates(matchup_table, year)\n\n week_matchups = matchup_table[matchup_table['Week'] == float(week)]\n sql_queries = []\n for i, row in week_matchups.iterrows():\n sql_queries.append(\"INSERT INTO \\\"2017_matchups\\\" (hometeam, awayteam, week, date) \"\n \"VALUES ({}, {}, {}, {});\".format(\n row.Home.upper(), row.Visitor.upper(), row.Week, row.datetime))\n self.set_db_data(sql_queries)\n print('Table filled successfully.')" ]
[ "0.65812904", "0.5443871", "0.536314", "0.5343846", "0.52965736", "0.52789164", "0.52286977", "0.5214561", "0.5166764", "0.5133594", "0.5110339", "0.5098538", "0.50696176", "0.50583833", "0.5039751", "0.5029864", "0.5022725", "0.5017969", "0.49726832", "0.4966863", "0.49282935", "0.49218598", "0.49186918", "0.4914737", "0.4913162", "0.491102", "0.49052715", "0.48907927", "0.48862267", "0.48744008" ]
0.74339116
0
Creates a unique fight ID from combining the names of both fighters and the dates, without spaces or special characters. This should work with rematches well. A potential edge case, but one we probably won't run into, is two fights with fighters of the same name fight on the same date.
def impute_fightID(fight): red = fight.at[0, 'Fighter'] blue = fight.at[1, 'Fighter'] date = fight['Date'][0] fight_id = red + blue + date fight_id = fight_id.strip('-,').replace(' ', '') fight.at[0, 'fight_id'] = fight_id fight.at[1, 'fight_id'] = fight_id return fight
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_random_fightID():\n pass", "def create_id(uid, begintime, endtime):\n allowed_chars = string.ascii_lowercase[:22] + string.digits\n temp = re.sub('[^{}]'.format(allowed_chars), '', uid.lower())\n return re.sub('[^{}]'.format(allowed_chars), '', uid.lower()) + str(arrow.get(begintime).timestamp) + str(arrow.get(endtime).timestamp)", "def double_name_female():\n while True:\n # We generate two random female names\n first_name = female_name()\n second_name = female_name()\n\n # The names are not allowed to be the same, therefore we need to\n # make sure that they aren't.\n if first_name == second_name:\n continue\n\n # If they are different, we connect them with a -\n else:\n new_name = first_name + \"-\" + second_name\n return new_name", "def make_title(dawn: str | None, dusk: str | None, /) -> str:\n logger.debug('Making title')\n if not dawn or not dusk:\n logger.error('Cannot find start/end date\\n')\n sys.exit(1)\n api_dfm, msg_dfm = '%Y-%m-%dT%H:%M:%SZ', '%d %B %Y'\n try:\n start_date = datetime.strptime(dawn, api_dfm).strftime(msg_dfm)\n end_date = datetime.strptime(dusk, api_dfm).strftime(msg_dfm)\n except ValueError as err:\n logger.error(f'{err}\\n')\n sys.exit(1)\n\n logger.debug('Title was made\\n')\n return f'From: {start_date} - To: {end_date}'", "def make_unique_id(venue_list):\r\n md5_hash = md5()\r\n for name in venue_list:\r\n md5_hash.update(name)\r\n hash_hex = md5_hash.hexdigest()\r\n return hash_hex[-8:]", "def generate_director_name():\n return movie_director_surnames[random.randint(0, len(movie_director_surnames) - 1)] + \" \" + movie_director_lastnames[random.randint(0, len(movie_director_lastnames) - 1)]", "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def generate_name():\n\n first_name = \"\"\n\n for letter in range(5):\n if letter % 2 == 0:\n first_name += CONSONANTS[randint(0,20)]\n else: # The letter is even\n first_name += VOWELS[randint(0,4)]\n\n last_name = \"\"\n for letter in range(5):\n if letter == 1 or letter == 3:\n last_name += CONSONANTS[randint(0, 20)]\n elif letter == 4:\n last_name += VOWELS[randint(0, 4)]\n else:\n last_name += VOWELS[randint(0, 4)] * 2\n\n last_name = last_name[0].upper() + last_name[1:]\n first_name = first_name[0].upper() + first_name[1:]\n username = first_name + last_name\n\n return username", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def __generate_id(self):\n ids = [int(fd.get('id')) for fd in self.favorite_drinks]\n return str(max(ids)+1)", "def get_unique_id(name: str) -> str:\n name = get_data_source(name)\n suffixes = \".\".join(sfx for sfx in get_format_suffixes(name) if sfx)\n return re.sub(rf\"[.]{suffixes}$\", \"\", name)", "def generate_trackerid():\r\n\r\n trackerid = None\r\n while trackerid is None or \\\r\n Profile.objects.filter(trackerid=trackerid).exists():\r\n trackerid = uuid.uuid4().hex\r\n return trackerid", "def generate_trackerid():\n\n trackerid = None\n while trackerid is None or \\\n Profile.objects.filter(trackerid=trackerid).exists():\n trackerid = uuid.uuid4().hex\n return trackerid", "def fight(self):\n\n returned_string = \"You killed {defeated_char_name}\" \\\n \"with {Weapon} and lost {health_lost} health. \" \\\n \"Your current health is {health_curr}\"\n if self.location.character is None:\n return \"There is no one to fight here.\"\n if isinstance(self.location.character, Friend):\n return self.location.characer.fight('friend')\n health_lost = self.location.character.fight(self.strength)\n if health_lost >= self.health:\n return f\"{self.location.character.name} crushes you, puny adventurer\\n\"\n else:\n self.health -= health_lost\n defeated_char_name = self.location.character.name\n if self.location.character.loot.name:\n returned_string += f\"You looted {self.location.character.loot.name} from {self.location.character.name}\"\n self.backpack[self.location.character.loot.name] = self.location.character.loot\n self.location.character.loot = None\n self.location.character = None\n if self.equipped['Weapon'] is None:\n return returned_string.format(defeated_char_name=defeated_char_name,\n weapon=\"Unarmed\",\n health_lost=health_lost,\n health_curr=self.health)\n return returned_string.format(defeated_char_name=defeated_char_name,\n weapon=self.equipped['Weapon'].name,\n health_lost=health_lost,\n health_curr=self.health)", "def full_name():\n gender = dice.randint(1, 100) # Over 50 is male, under 50 is female\n double_first = dice.randint(1, 100) # Over 10 no\n double_last = dice.randint(1, 100) # Only between 40 and 55\n doctor = dice.randint(1, 1000) # Different for men and women\n # Gender distribution is 50/50 (with only 2 genders),\n # 10% have a double first name,\n # 15 % have a double last name and\n # 1% are doctors.\n name = \"\"\n prefix = \"\"\n\n # We use the prefix to get a clear identifier in case the name can\n # be used for both genders\n if gender <= 50 and double_first <= 10:\n name = double_name(\"male\")\n if name.split(\"-\")[0] in names.woman:\n prefix = \"Herr \"\n elif gender <= 50:\n name = male_name()\n if name in names.woman:\n prefix = \"Herr \"\n elif gender > 50 and double_first <= 10:\n name = double_name(\"female\")\n if name.split(\"-\")[0] in names.man:\n prefix = \"Frau \"\n elif gender > 50:\n name = female_name()\n if name in names.man:\n prefix = \"Frau \"\n\n # Now we add a last name or even a double last name\n if 40 <= double_last < 55:\n name += \" \" + double_name(\"family\")\n else:\n name += \" \" + last_name()\n\n # Last but not least we check if the person is a doctor\n if gender <= 50 and doctor <= 11:\n name = \"Dr. \" + name\n elif gender > 50 and doctor <= 9:\n name = \"Dr. \" + name\n\n # If the prefix isn't empty, we add it to the name\n if prefix:\n name = prefix + name\n return name", "def create_uuid_from_form_name(name):\n prefix = 'buendia-form-'\n max_uuid_len = 38\n max_name_len = max_uuid_len - len(prefix)\n if len(name) > max_name_len:\n warnings.warn(\n \"The form name '%s' has been clipped to create a unique form ID. \"\n \"Note that if you have another form that starts with the same %d \"\n \"characters as this form, the form IDs will collide and only one \"\n \"form will be displayed. To fix this, ensure that the first %d \"\n \"characters of each form name are unique.\"\n % (name, max_name_len, max_name_len) )\n return prefix + normalize_name(name[:max_name_len])", "def random_name_maker():\n new_out = ''\n for i in range(10):\n random_letter_or_number = random.randint(1, 2)\n if random_letter_or_number is 1:\n new_out += random_letter(letters)\n if random_letter_or_number is 2:\n new_out += str(random.randint(0, 9))\n if new_out not in names_generated: # it's unique\n names_generated.append(new_out)\n return new_out", "def unique_token_name(self, other: SkupperSite) -> str:\n return hashlib.sha256(f\"{other}-{self}\".encode(\"UTF-8\")).hexdigest()", "def generate_reader_name():\n return reader_surnames[random.randint(0, len(reader_surnames) - 1)] + \" \" + reader_lastnames[random.randint(0, len(reader_lastnames) - 1)]", "def double_name_male():\n while True:\n # We generate two random male names\n first_name = male_name()\n second_name = male_name()\n\n # The names are not allowed to be the same, therefore we need to\n # make sure that they aren't.\n if first_name == second_name:\n continue\n\n # If they are different, we connect them with a -\n else:\n new_name = first_name + \"-\" + second_name\n return new_name", "def name_generator():\n firsts = [\"Albrecht\", \"Lysa\", \"Yvette\", \"JΓ©sus\", \"Amanitus\"]\n lasts = [\"Andersson\", \"Natt och Dag\", \"av Pommern\", \"Krusmynta\"]\n\n random.seed()\n first = firsts[random.randint(0, len(firsts)-1)]\n last = lasts[random.randint(0, len(lasts)-1)]\n\n name = first + \" \" + last\n return name", "def _generate_name(name):\n return 'test-%s-%s-%s' % (time.strftime('%Y%m%d%H%M%S'),\n random.randint(0, 999), name)", "def unique_id(self) -> str:\n return \"_\".join([self._name, \"climate\"])", "def name_generator():\n prefix_list = [\n \"admiring\", \"adoring\", \"affectionate\", \"agitated\", \"amazing\", \"angry\",\n \"awesome\", \"beautiful\", \"blissful\", \"bold\", \"boring\", \"brave\", \"busy\",\n \"charming\", \"clever\", \"cool\", \"compassionate\", \"competent\",\n \"confident\", \"crazy\", \"dazzling\", \"determined\", \"distracted\",\n \"ecstatic\", \"elegant\", \"eloquent\", \"epic\", \"exciting\", \"festive\",\n \"flamboyant\", \"focused\", \"friendly\", \"frosty\", \"funny\", \"gifted\",\n \"goofy\", \"gracious\", \"great\", \"stoic\", \"strange\", \"suspicious\",\n \"tender\", \"thirsty\", \"trusting\", \"unruffled\", \"upbeat\",\t\"vibrant\",\n \"vigilant\", \"vigorous\", \"wizardly\", \"wonderful\", \"youthful\", \"zealous\",\n \"dreamy\", \"eager\", \"sweet\", \"zen\"\n ]\n\n postfix_list = [\n \"deepak\", \"vrushal\", \"abhishek\", \"kausthubh\", \"sandeep\", \"praveen\",\n \"shailesh\", \"sachine\", \"chetan\", \"smita\", \"digvijay\", \"kedar\",\n \"prayas\", \"mandar\", \"ashish\", \"arvind\"\n ]\n\n name = prefix_list[randint(0, len(prefix_list)-1)]+\"-\"+postfix_list[randint(0, len(postfix_list)-1)]+\"-\"+str(randint(100, 999))\n return name", "def name_to_id(player_name):\n # This is fairly unsophisticated, just does a CRC32 on the name. Can be\n # optimized both for compute requirements and collision frequency using\n # another hashing algorithm.\n return binascii.crc32(player_name) & 0xFFFFFFFF", "def uniquify(s):\n curr_time = datetime.datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\n return s+'_%s'%curr_time", "def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())", "def fight(fighters):\n return {}", "def _create_unique_turtle_name(self):\n\n\t\tself._id_counter += 1\n\t\tnew_name = \"turtle{}\".format(self._id_counter)\n\n\t\tif self._has_turtle(new_name):\n\t\t\treturn self._create_unique_turtle_name()\n\n\t\treturn new_name" ]
[ "0.538992", "0.51855546", "0.51556164", "0.5067941", "0.5064117", "0.5042174", "0.49878252", "0.49513406", "0.49481347", "0.49373364", "0.4870385", "0.4864954", "0.4862721", "0.48584658", "0.48499587", "0.48478696", "0.48476604", "0.48139304", "0.4779611", "0.4775464", "0.47721863", "0.47718304", "0.4748957", "0.47361", "0.47339094", "0.47208902", "0.47049832", "0.46986878", "0.46830317", "0.46708456" ]
0.6535847
0
Returns the index used for the unknown token.
def unk_index(self) -> int: return self._unk_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unk_token(self):\r\n if self._unk_token is None:\r\n logger.error(\"Using unk_token, but it is not set yet.\")\r\n return self._unk_token", "def _value_token_index(self):\n # TODO: memoize this value\n for i, token in enumerate(self.tokens):\n if not token.type.is_metadata:\n return i\n raise RuntimeError('could not find a value token')", "def find_special_token_index(identified_concepts: IdentifiedConcepts, special_token: str):\n for i in range(len(identified_concepts.ordered_concepts)):\n concept = identified_concepts.ordered_concepts[i]\n if concept.name == special_token:\n return i\n return -1", "def _index_lookup(self, key: int) -> str:\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token", "def _index(self) -> int:\n return -1", "def unkid(self):\r\n return self.word2idx.get(UNK, 0)", "def index(self, word):\n return self.tokens.index(word)", "def get_illegal_index(s, tokens=ALL_ESC_TOKENS):\n backslash = \"\\\\\" # this is a single backslash!\n i = 0 # char counter\n while i < len(s):\n c = s[i]\n if c != backslash:\n i += 1\n continue\n if c == backslash:\n if i + 1 < len(s) and s[i + 1] == \" \":\n # ignoe backslash followed by space\n i += 2\n continue\n else:\n is_legal = False\n for token in tokens:\n try:\n ss = s[i + 1 : i + 1 + len(token)]\n except IndexError:\n continue\n if ss == token:\n is_legal = True\n i += len(token)\n break\n else:\n continue\n if not is_legal:\n return i + 1\n return None", "def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx", "def unk(self):\n return self.UNK", "def itos(self, i):\n token = self._itos.get(i)\n return token if token else 'UNK'", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def _word_lookup(self, key: str) -> int:\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx", "def get_index(self, item: _T) -> int:\n if item not in self.item_to_index and self.unknown is not None:\n return self.item_to_index[self.unknown]\n return self.item_to_index[item]", "def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None", "def get_ident():\n return -1", "def get_oldest_index(self):\n for k in self.go_forward(start=self.write_pos):\n if self.log[k] != None:\n break\n return k", "def _tokens_to_index(self,tokens):\n wids = []\n for tk in tokens:\n if tk in self.wtoi.keys():\n wids.append(self.wtoi[tk])\n else:\n wids.append(1) # <UNK>\n for _ in range(self.sentence_max_length - len(wids)):\n wids.append(0)\n if len(wids) > self.sentence_max_length:\n wids = wids[:self.sentence_max_length]\n return wids", "def num_tokens(self, index):\r\n raise NotImplementedError", "def getindex(self):\n for index in range(1, len(self.quotes_list)):\n if str(index) not in self.quotes_list.keys():\n return index\n return len(self.quotes_list) + 1", "def state_index_for_symbol(self, symbol):\n for idx, state in enumerate(self):\n if state.symbol == symbol:\n return idx\n if value in self.symbol_synonyms:\n return self.index(self.symbol_synonyms[value])\n raise Exception(\"State with symbol of '%s' not defined\" % symbol)", "def getWordIdx(token, word2Idx): \n if token in word2Idx:\n return word2Idx[token]\n elif token.lower() in word2Idx:\n return word2Idx[token.lower()]\n \n return word2Idx[\"UNKNOWN_TOKEN\"]", "def get_list_index(self):\r\n s = self.query('LIST:MAN?')\r\n if s == None: return None\r\n return int(s)-1", "def frame_idx(self) -> int:\n pass", "def index(self) -> int:\r\n return self._index", "def index(self) -> int:", "def getWordIdx(token, word2Idx): \n if token in word2Idx:\n return word2Idx[token]\n elif token.lower() in word2Idx:\n return word2Idx[token.lower()]\n \n return word2Idx[\"UNKNOWN\"]", "def _get_index(self, character):\n OFFSET = 65 # ascii value of 'A' since the first element should be 'A'\n index = ord(character) - OFFSET\n return index", "def getIndices(self, tokens):\n tokenTxt, posTxt = attachTokens(tokens)\n if tokenTxt in self.tokenIndices:\n tokenIdx = self.tokenIndices[tokenTxt]\n else:\n tokenIdx = self.tokenIndices[unk]\n if posTxt in self.posIndices:\n posIdx = self.posIndices[posTxt]\n else:\n posIdx = self.posIndices[unk]\n return tokenIdx, posIdx", "def character(index):\n # Default: act as a dummy.\n return index" ]
[ "0.72003436", "0.6942262", "0.68594867", "0.67493963", "0.6612994", "0.6493423", "0.6222837", "0.619791", "0.61259353", "0.6096506", "0.6081844", "0.60786164", "0.605654", "0.60505426", "0.59887683", "0.5978605", "0.59053624", "0.5880216", "0.5852015", "0.5846026", "0.58355534", "0.58186805", "0.5761412", "0.57565", "0.57531905", "0.5750771", "0.5740293", "0.5717514", "0.5716755", "0.5694823" ]
0.76617646
0
Returns the index used for the endofsentence token.
def eos_index(self) -> int: return self._eos_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_of_text_token(self) -> str:\n return self._end_of_text_token", "def get_last_sentence_end(text: str) -> int:\n end = -1\n for e in sentence_ends:\n p_end = [m.end() for m in re.finditer(e, text)]\n end = max(end, p_end[-1] if len(p_end) > 0 else -1)\n return end", "def eosid(self):\r\n return self.word2idx.get(EOS, 0)", "def eos_token(self):\r\n if self._eos_token is None:\r\n logger.error(\"Using eos_token, but it is not set yet.\")\r\n return self._eos_token", "def index(self, word):\n return self.tokens.index(word)", "def GetIfIndex(self):\n return self.indentindex[-1]", "def getLastOpTokenIndex(tokens):\n currentTokenIndex = tokens.index\n currentToken = tokens.get(currentTokenIndex)\n # operator β†’ dot-operator-headΒ­ dot-operator-characters\n if currentToken.type == DOT and tokens.get(currentTokenIndex + 1).type == DOT:\n # dot-operator\n currentTokenIndex += 2 # point at token after \"..\"\n currentToken = tokens.get(currentTokenIndex)\n\n # dot-operator-character β†’ .Β­ | operator-characterΒ­\n while (currentToken.type == DOT or\n SwiftSupport.isOperatorCharacter(currentToken)):\n currentTokenIndex += 1\n currentToken = tokens.get(currentTokenIndex)\n return currentTokenIndex - 1\n\n # operator β†’ operator-headΒ­ operator-charactersΒ­?\n if SwiftSupport.isOperatorHead(currentToken):\n tokens.getText()\n currentToken = tokens.get(currentTokenIndex)\n while SwiftSupport.isOperatorCharacter(currentToken):\n currentTokenIndex += 1\n currentToken = tokens.get(currentTokenIndex)\n return currentTokenIndex - 1\n else:\n return -1", "def last_sequence_ind(self,):\n return self.last_sequence_ind_", "def num_tokens(self, index):\r\n raise NotImplementedError", "def end(self) -> pos.Pos:\n return self.__end", "def get_next_word(self, index, orignal=False):\n try:\n if orignal:\n return self.df.iloc[index + 1][1]\n return self.label_encoder.transform([self.df.iloc[index + 1][1]])[0]\n except IndexError:\n if orignal:\n return \"<END>\"\n return self.label_encoder.transform([\"<END>\"])[0]\n except ValueError:\n # Returning -1 for unseen words\n return -1", "def end_index(self):\n if self.value == self._query.num_pages:\n # Special case for the last page\n # because there can be orphans.\n return self._query.count\n else:\n return self.value * self._query.page_size.value", "def get_ends(self): \n return self.last_words", "def last_index(self) -> int:\n return self._last_index", "def num_tokens(self, index):\n return max(self.d1.num_tokens(index), self.d2.num_tokens(index))", "def getSentence(self):\n return self.tokens[0].sentence", "def length(self):\n return len(self.tokens)", "def i (self):\n\n return self.end - 1", "def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None", "def search_start_end_index_in_sentence(sent, np):\n\n nps = [x for x in np.split() if x]\n if len(nps) == 0:\n return (-1, -1)\n elif len(nps) == 1:\n indices = search_one_token_reducing_suffix(sent, np)\n if len(indices) > 0:\n return (indices[0], search_next_whitespace(sent, indices[0]))\n else:\n return (-1, -1)\n else:\n # search start:\n start = search_correct_position(sent, nps)\n end = search_correct_position(sent, nps, True)\n if end != -1:\n end = search_next_whitespace(sent, end)\n return (start,end)", "def end_of_sentence(sentence, index, length):\n if (index >= length - 1): return True\n while (index < length):\n ch = sentence[index]\n index += 1\n if (is_word_character(ch)): return False\n return True", "def getLength( self, sbjct_token ):\n if not self.mIsLoaded: self.__loadIndex()\n return self.mIndex[sbjct_token][2]", "def last_token(self, text):\n if text is not None:\n text = text.strip()\n if len(text) > 0:\n word = self.safe_split(text)[-1]\n word = word.strip()\n return word\n return ''", "def _value_token_index(self):\n # TODO: memoize this value\n for i, token in enumerate(self.tokens):\n if not token.type.is_metadata:\n return i\n raise RuntimeError('could not find a value token')", "def get_last_index(self):\n return len(self.chain) - 1", "def end(self):\n return self._t0 + self.length", "def is_end_of_sentence(prev_token, current_token):\n is_capital = current_token[0].isupper()\n is_punctuation = prev_token in ('!', '?', '.')\n return is_capital and is_punctuation", "def end_tag_or_none(self, token):\n if self.patterns['end_tag'].match(token):\n return token[2:-4].upper()", "def _get_end(self):\n return self._end", "def _get_yacc_lookahead_token(self):\n return self.lexer.last_token" ]
[ "0.69489163", "0.6631694", "0.65316284", "0.62150425", "0.614985", "0.6049897", "0.5994448", "0.5987369", "0.5985675", "0.59603405", "0.59585285", "0.5903485", "0.58980507", "0.58945006", "0.5892147", "0.5859628", "0.5856385", "0.5851119", "0.5822088", "0.58077884", "0.5755482", "0.5741642", "0.56999415", "0.56807566", "0.5665711", "0.5660556", "0.5655792", "0.5632176", "0.5630681", "0.56298465" ]
0.6858568
1
Returns the index used for padding.
def padding_index(self) -> int: return self._pad_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padid(self):\r\n return self.word2idx.get(PAD, 0)", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def to_index(self):\r\n return (BOARD_HEIGHT - 1 - self.y) * BOARD_HEIGHT + (BOARD_WIDTH - 1 - self.x)", "def idx2off(i):\n return i * 32 - (24 * (i//4))", "def make_positions(tensor, padding_idx):\n mask = tensor.ne(padding_idx).int()\n return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx", "def byteIndex(self):\n return self.offset", "def pad(self):\n return self._pad", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def _get_padding_width(self, column_index: int) -> int:\n _, pad_right, _, pad_left = self.padding\n if self.collapse_padding:\n if column_index > 0:\n pad_left = max(0, pad_left - pad_right)\n return pad_left + pad_right", "def offset_pad(self, offset):\n return (((offset + 3) / 4) * 4)", "def word_ptr(base, index):\n\n return base + 4*index", "def get_idx_from_sent(sent, word_index, max_l, pad):\n x = [0] * pad # left padding\n for word in sent:\n if word in word_index: # FIXME: skips unknown words\n if len(x) < max_l: # truncate long sent\n x.append(word_index[word])\n else:\n break\n # len(x) includes pad\n rpad = [0] * max(0, max_l + 2 * pad - len(x)) # right padding\n return x + rpad", "def getPidx(self):\n return int(bytes(self.keeper.getGbl(b\"pidx\")), 16)", "def north_index(self, index):\n return index - self.size", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def get_output_slice_idx(self, output_index):\r\n ipos = 0\r\n opos = output_index\r\n for otaps in zip(self.mitmot_out_taps()):\r\n if len(otaps) > 0:\r\n return ipos\r\n else:\r\n opos = opos - 1\r\n ipos += len(otaps)\r\n return ipos + opos", "def _index(self) -> int:\n return -1", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def pad(self):\n return self.PAD", "def get_pad_info(self, index):\n if index == 0:\n return self.pad_token\n elif index == 1:\n return self.pad_id\n else:\n raise ValueError(\"Wrong index for get pad token information......\")", "def _get_index(self, character):\n OFFSET = 65 # ascii value of 'A' since the first element should be 'A'\n index = ord(character) - OFFSET\n return index", "def padding(sent, sequence_len):\n if len(sent) > sequence_len:\n sent = sent[:sequence_len]\n padding = sequence_len - len(sent)\n sent2idx = sent + [0]*padding\n return sent2idx, len(sent)", "def get_input_offset(self):\n return ELFLING_PADDING + len(self.__data) - 4", "def index(self) -> int:\r\n return self._index", "def ring_idx(self) -> int:\n return self._ring_idx", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def InterfaceIndex(self) -> int:" ]
[ "0.72384465", "0.6775465", "0.6775465", "0.6775465", "0.66019714", "0.65446895", "0.6462349", "0.64010626", "0.6396798", "0.63369316", "0.62652266", "0.6255782", "0.6193014", "0.6142206", "0.6112773", "0.61080986", "0.61021554", "0.61016154", "0.61009765", "0.608965", "0.6088242", "0.6085784", "0.60798097", "0.60625124", "0.60614276", "0.6022151", "0.6021396", "0.60171396", "0.6012747", "0.6010871" ]
0.88537806
0
Returns the index used for masking.
def mask_index(self) -> int: return self._mask_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask(self):\n return self.mask_index", "def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def getMask(self,filt):\n indx = [self.mask[i] for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx", "def Mask(self) -> int:", "def mask_id(self) -> int:\n return self.tokenizer.get_command('MASK').Id", "def get_mask_offset(mask):\n # use ctypes to truncate the result to a uint32\n cmask = ctypes.c_uint32(mask).value\n return _bruijn32lookup[ctypes.c_uint32((mask & -mask) * 0x077cb531).value >> 27]", "def get_index(self):\n return self.inverted_index", "def mask_id(self):\n m = 2 * self.mask_full()\n m[0:self.size, 0:self.size] = self.id * self.mask()\n return m.astype(np.int16)", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def _getindicator(self, index: int) -> int:\n bitmask = 1 << (index + 1)\n return self._get_buffer(0x04) & bitmask", "def mask(self):\n return ((2**(self.width) - 1) << self.lsb)", "def mask_indices(n,mask_func,k=0):\r\n m = ones((n,n),int)\r\n a = mask_func(m,k)\r\n return where(a != 0)", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def getAddressOfIndex(self) -> long:\n ...", "def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx", "def get_sample_mask(self):", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def mask(self) -> str:\n return self.tokenizer.get_command('MASK').Id", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def get_idxes_from_mask(mask):\n if len(mask) > 1:\n return torch.nonzero(mask.squeeze(), as_tuple=False).reshape(1, -1)[0]\n elif len(mask) == 1:\n return torch.tensor([0], dtype=torch.int64) if mask.sum() == 1 else torch.tensor([], dtype=torch.int64)\n return torch.tensor([], dtype=torch.int64)", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def extract_mask(self, i):\n mu = 0\n for j in range(self._N-1, -1, -1):\n mu = mu << 1\n if self._compact_M[j] > i:\n mu = mu | 1\n return mu", "def _index(self) -> int:\n return -1" ]
[ "0.8292295", "0.74047023", "0.73821837", "0.70974326", "0.7022145", "0.6828254", "0.67685896", "0.6646346", "0.6616001", "0.65767497", "0.6516475", "0.6475054", "0.63904303", "0.6369527", "0.6369527", "0.6369527", "0.6369527", "0.6355274", "0.6355274", "0.6354276", "0.6346948", "0.6343789", "0.63090706", "0.62974596", "0.62862307", "0.6270736", "0.6250643", "0.6228419", "0.620324", "0.61933726" ]
0.88710827
0
This function checks whether suggestions has seed keywords.
def checkSeedKeywordExists(self, keyword, meta_keyword): keyword_ = re.sub('[^A-Za-z0-9]+', '', keyword) split_meta_keyword = meta_keyword.split() condition = False """ splitting meta keyword with space and checking each word from meta keyword exists in the keyword example :: for keyword "air bbq fryer" and meta keyword "air fryer" we split meta keyword whch is ["air","fryer"] and we check each value in the array exists in the keyword "air bbq fryer" """ if len(split_meta_keyword) > 1: condition = all(x.lower() in keyword.lower() for x in split_meta_keyword) if meta_keyword.lower() in keyword.lower() or meta_keyword.lower() in keyword_.lower() or condition: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_keyword_present(self,text):\n if self.keywords_re.search(text) and not self.blocked_keywords_re.search(text):\n print self.blocked_keywords_re.search(text)\n return True\n return False", "def IsImportant(self, key):\n\n if any(x.lower() == key for x in self.keywords):\n return True\n return False", "def has_enough_keywords(pkg_id):\n sql = 'select count(*) from keywords where doc_id = ' + str(pkg_id)\n res = util.executeSQL(conn, sql)\n words_count = 0\n for row in res:\n words_count = int(row[0])\n if words_count < 2:\n delete_package(pkg_id)\n delete_keywords(pkg_id)\n return False\n return True", "def test_guided():\n top_n = 5\n seed_keywords = [\"time\", \"night\", \"day\", \"moment\"]\n keywords = model.extract_keywords(doc_one,\n min_df=1,\n top_n=top_n,\n seed_keywords=seed_keywords)\n\n assert isinstance(keywords, list)\n assert isinstance(keywords[0], tuple)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n", "def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True", "def _seed_words(self, seed):\n valid_words = set()\n for i in range(3, 7):\n for _tuple in itertools.permutations(list(seed), i):\n if ''.join(_tuple) in self._word_set:\n valid_words.add(''.join(_tuple))\n return valid_words", "def has_keyword(tweet, keywords):\n temp = tweet.lower()\n for keyword in keywords:\n if bool(re.search(f'\\\\b{keyword}\\\\b', temp)):\n return True\n return False", "def isWordSet(self):\n return len(self.getWord()) != 0", "def is_offensive(drug_name, bad_words):\n\n for bad_word in bad_words:\n if bad_word in drug_name:\n return True\n return False", "def test_only_matches(self):\n completions = flask_server.get_completions(\"zy\", 5)\n self.assertEqual(completions, ['zydeco', 'zygote', 'zygotic', 'zymurgy'])", "def _check_for_typos(all_tokens):\n d = enchant.DictWithPWL(\"en_US\",\"mywords.txt\")\n known_typo_list = enchant.DictWithPWL(\"en_US\", \"add_to_typo_list.txt\")\n unknown_word_list = enchant.DictWithPWL(\"en_US\", \"unknown_words.txt\")\n\n counter = 0\n incorrect_token_list = []\n for token in all_tokens:\n counter += 1\n is_correct = d.check(token)\n known_typo = known_typo_list.check(token)\n unknown_word = unknown_word_list.check(token)\n\n if not is_correct and not known_typo and not unknown_word and len(token) > 4:\n # print counter, \"Token: \", token, \", IC: \", is_correct, \"KT: \", known_typo, \"UW: \", unknown_word, \"LEN: \", len(token)\n print token\n if token not in incorrect_token_list:\n incorrect_token_list.append(token)\n print \"** Num Incorrect: \", len(incorrect_token_list)", "def iskeyword(self, arg: str):\n if arg.upper() in self._keywords:\n return True\n return False", "def test_queryKeywordFlag(self):\n self._keywordFilteringTest(\"keyword\")", "def nonexistent(self, resp):\n return any(a in str(resp).lower() for a in NOT_FOUND_KEYWORDS)", "def keyword_relevant(self):\n return self.metadata.keyword_relevant", "def unverifiable(self, resp):\n return any(a in str(resp).lower() for a in UNVERIFIABLE_KEYWORDS)", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} Γ€r inte tillΓ₯tet, \"\n f\"anvΓ€nd {u_word['alternative']} istΓ€llet.\",\n word=word,\n )\n break", "def isGoalState(self, state):\n wds = get_words(state)\n # checks if every word in corpus - USELESS/Possible damage\n # for i in range(len(wds)):\n # if (self.bigramCost(wds[i], self.not_word) >= self.threshold):\n # return False\n for i in range(len(wds)):\n if (wds[i] not in self.fills[i]):\n return False\n return True", "def check_words(title, wordlist, verbose=False):\n\tfor word in wordlist:\n\t\tif title.find(word) >= 0:\n\t\t\tif verbose:\n\t\t\t\tprint(\"\\t\\tFOUND '\"+word+\"' IN:\", title)\n\t\t\treturn True\n\treturn False", "def _candidates(self, token):\n token_as_list = [token]\n token_1_edits = SymmetricDeleteCorrector._one_edit_deleted_variations(token)\n token_2_edits = SymmetricDeleteCorrector._two_edits_deleted_variations(token)\n return (\n self._known_in(token_as_list) or\n self._deleted_variation_2_dictionary_words[token] or\n set(\n chain.from_iterable(\n self._deleted_variation_2_dictionary_words[token_1_edit] for token_1_edit in token_1_edits)) or\n set(\n chain.from_iterable(\n self._deleted_variation_2_dictionary_words[token_2_edit] for token_2_edit in token_2_edits)))", "def _is_term_exist(self, term):\n return term in self.postingDict", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def handle_suggest():\n return 0", "def word_length_check(self):\r\n \r\n for word in self.all_words:\r\n if len(word) == len(self.best_guess):\r\n self.valid_words.add(word)", "def is_unimportant(word):\n return word in ['.', '!', ',', ] or '\\'' in word or word in stop_words", "def text_is_relevant(self, text):\n for word in text:\n if word in self.relevant_words:\n return True\n return False", "def canAnswer(self, layer):\n return wt.hangman in layer[wt.keywords]", "def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords", "def all_words_in_model( wordlist, model ):\n for w in wordlist:\n if w not in model:\n return False\n return True" ]
[ "0.67313564", "0.6352555", "0.6231023", "0.6097585", "0.59682447", "0.5963816", "0.5954134", "0.5869083", "0.582597", "0.5774812", "0.5728218", "0.57240635", "0.5721389", "0.5718693", "0.5712878", "0.56806475", "0.5676378", "0.5666818", "0.5658296", "0.56458634", "0.56441575", "0.5607822", "0.5607822", "0.5606814", "0.5596503", "0.55964154", "0.5592114", "0.55734587", "0.55639344", "0.555902" ]
0.64882475
1
return list of suggestion based on the geolocation and language for a seed keyword
def fetchSuggestion(self, keyword, seed_keyword, meta_keyword): # user agent is an HTTP browser request header that gives servers information regarding the client device and/or operating system on which the browser is running user_agent_list = [ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36', ] url = "http://suggestqueries.google.com/complete/search?client=chrome&hl={}&gl={}&callback=?&q={}".format( self.language, self.country, keyword) user_agent = random.choice(user_agent_list) headers = {"user-agent": user_agent, "dataType": "jsonp"} response = requests.get(url, headers=headers, verify=True) if response.status_code == 200: suggestions = json.loads(response.text) sugg = [] index = 0 relevancies = [] suggesttypes = [] suggestsubtypes = [] verbatimrelevance = "" if "google:suggestrelevance" in suggestions[4].keys(): relevancies = suggestions[4]['google:suggestrelevance'] if "google:suggesttype" in suggestions[4].keys(): suggesttypes = suggestions[4]['google:suggesttype'] if "google:verbatimrelevance" in suggestions[4].keys(): verbatimrelevance = suggestions[4]['google:verbatimrelevance'] if "google:suggestsubtypes" in suggestions[4].keys(): suggestsubtypes = suggestions[4]['google:suggestsubtypes'] for word in suggestions[1]: if self.checkSeedKeywordExists(word, meta_keyword): sugg.append({ 'keyword': word, 'relevancy_score': relevancies[index] if len(relevancies) > 0 else None, 'suggesttype':suggesttypes[index] if len(suggesttypes) > 0 else None, 'verbatimrelevance' : verbatimrelevance, 'seed_keyword': seed_keyword, 'meta_keyword': meta_keyword, 'suggestsubtype' : suggestsubtypes[index] if len(suggestsubtypes) > 0 else None, }) else: continue index += 1 return sugg # returning false when google blocks an ip for some time return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSuggestions(self, query): # a suggestions method for all the suggestions\r\n\r\n \r\n suggestions =[] # a new list to add all the suggested words\r\n\r\n j = len(query) # find length of the query \r\n list_of_req_strings = [i for i in self._languageSet if len(i) in ( j -1, j, j+ 1)] # a list for required strings with equal lenth of query\r\n # and words with length of query +1 and -1\r\n\r\n \r\n for word in list_of_req_strings: # a for loop to check all the saved strings with query\r\n if query == word: # if query is a legitimate word (equal to word in list)\r\n suggestions.append(word) # add it to the suggestions\r\n n = len(word) # find the length of each word\r\n match_Count = 0 # keep the count of matched letters\r\n for i in range(j): # a for loop with range of length of query\r\n if query[0].isupper(): # check if the first letter is upper case letter\r\n query, word = query.capitalize(), word.capitalize() # capitalize query and each word if query's first letter is a upper case letter\r\n if query[i] in word: # then check each letter of query in word\r\n match_Count += 1 # increase the macth count if matches\r\n else: # if first letter of query is not an upper case letter\r\n query, word = query.lower(), word.lower() # change query and each word to lower case\r\n if query[i] in word: # then check each letter of query in word\r\n match_Count += 1 # increase the macth count if matches\r\n\r\n if match_Count == (max(j, n) - 1): # compare the match count with the max letter string of query and word\r\n suggestions.append(word) # add the word if it is one edit distance\r\n if match_Count == len(word)+1: # if match count is one value more than the word\r\n if not word in suggestions: # if the word not in suggestions\r\n suggestions.append(word) # add the word to the suggestions list\r\n\r\n\r\n for k in range(j - 1): # a for loop with range of query length -1\r\n inverted_char_word = query[:k]+query[k+1]+query[k]+query[(k+2):] # check by changing two letters(neighbering letters) till last letter\r\n if inverted_char_word not in suggestions: # if the changed word not in suggestions\r\n if inverted_char_word in list_of_req_strings: # and in list_req_strings\r\n suggestions.append(inverted_char_word) # add the changed word to the suggestions\r\n\r\n return (sorted(suggestions)) # return the sorted suggestions list\r", "def autocomplete():\n query = '' if request.args.get('query') is None else request.args.get('query')\n\n prefixed_words = []\n close_words = []\n for f in app.preprocessed.words:\n lowered = f.lower()\n if lowered.startswith(query) and lowered != query:\n prefixed_words.append(f)\n elif levenshtein(query, lowered) <= 1:\n close_words.append(f)\n\n result = {\n 'success': True,\n 'data': {\n 'suggestions': prefixed_words + close_words\n }\n }\n return jsonify(result)", "def _search_suggestions():\n now = time.time()\n words_q = Application.objects.values('acronym',\n 'owner', 'owner_org',\n 'nasa_off_name', 'nasa_requester',\n 'manager_app_development', 'manager_project',\n 'dev_name_primary', 'dev_name_alternate').distinct()\n wordset = set()\n for worddict in words_q:\n vals = worddict.values()\n for val in vals:\n wordset.add(val)\n words = [word for word in wordset if word]\n words.sort()\n logging.info(\"search_suggestions len=%d time=%f\" % (len(words), time.time() - now))\n return json.dumps(words)", "def _get_new_location(self, word):\n language = \"\"\n if self.language == \"esp\" or self.language == \"spanish\":\n language = \"-spanish\"\n elif self.language == \"fra\" or self.language == \"french\":\n language = \"-french\"\n if word in NAMED_ENTITIES[\"country\" + language]:\n return np.random.choice(NAMED_ENTITIES[\"country\" + language], self.n)\n elif word in NAMED_ENTITIES[\"nationality\" + language]:\n return np.random.choice(NAMED_ENTITIES[\"nationality\" + language], self.n)\n elif word in NAMED_ENTITIES[\"city\"]:\n return np.random.choice(NAMED_ENTITIES[\"city\"], self.n)\n return []", "def suggestions_wikipedia(query, lang=\"fr\"):\n wikipedia.set_lang(\"fr\")\n return wikipedia.search(query, results=10)", "def build_suggesters(DomainName=None):\n pass", "def generate_suggestions(search_string):\n\n root = read_data_model('data/data_model.pkl')\n with ListStream() as x:\n root.search(search_string, '')\n print [s.strip('\\n') for s in x.data if s != '\\n']", "def suggest_completions(cls, text):\n USER_SUGGEST = 'user-suggest'\n es = UserMappingType.search().get_es()\n\n results = es.suggest(index=cls.get_index(), body={\n USER_SUGGEST: {\n 'text': text.lower(),\n 'completion': {\n 'field': 'suggest'\n }\n }\n })\n\n if results[USER_SUGGEST][0]['length'] > 0:\n return results[USER_SUGGEST][0]['options']\n\n return []", "def spelling_suggestions(drug_name):\n if not isinstance(drug_name, str):\n raise TypeError(\"drug_name must be a string.\")\n r = requests.get(f\"https://rxnav.nlm.nih.gov/REST/spellingsuggestions.json?name={drug_name}\")\n response = r.json()\n suggestions = response['suggestionGroup']['suggestionList']['suggestion']\n return suggestions", "def suggestion(self):\n raise NotImplementedError()", "def getSuggestions(self,query):\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n self._possible = [] #List of strings one change away\n self._final = [] #Final list of suggestions\n self._alphabet = list(string.ascii_lowercase) # Produces a list of all lowercase letters.\n self._alphabet.extend(('-',' '))\n self._query = query.lower()\n for i in range((len(query))-1):\n possible = self._query[:i]+self._query[i+1]+self._query[i]+self._query[(i+2):] #Add cases of inverting two letters\n self._possible.append(possible)\n for i in range(len(query)):\n possible = self._query[:i] + self._query[(i+1):] #Add cases of deleting one letter\n self._possible.append(possible)\n for g in range(len(self._alphabet)):\n possible = self._query[:i]+self._alphabet[g]+self._query[(i+1):] #Add cases of inserting one letter\n possibleAlso = self._query[:i]+self._alphabet[g]+self._query[i:] #Add cases of replacing one letter\n self._possible.append(possible)\n self._possible.append(possibleAlso)\n suggestionLength = len(self._possible)\n for i in range(suggestionLength):\n self._possible.append(self._possible[i].capitalize()) #Add all possible strings, capitalized (doubles list length)\n for i in self._possible:\n if i in self._words:\n if i not in self._final: #Removes duplicates from final list\n if i != query: \n self._final.append(i)\n if query.islower() == True:\n for i in self._final:\n if i[0].isupper() == True:\n if i[0] != query[0].upper():\n self._final.remove(i)\n if query.istitle() == True:\n self._final = [i.capitalize() for i in self._final]\n self._final.sort()\n return self._final", "def suggest(self, **kwargs):\n return suggest.suggest(self._host, self._session, **kwargs)", "def find_location_abbreviations(question_tokens, question):\n country_name_abbrevations_US = [\n 'USA', 'US', 'United States', 'United States of America'\n ]\n\n country_name_abbrevations_UK = [\n 'UK', 'United Kingdom', 'England'\n ]\n\n location_abbvreviations_US = {\n 'AK': ['Alaska'],\n 'AL': ['Alabama'],\n 'AR': ['Arkansas'],\n 'AZ': ['Arizona'],\n 'CA': ['California'],\n 'CO': ['Colorado'],\n 'CT': ['Connecticut'],\n 'DE': ['Delaware'],\n 'FL': ['Florida'],\n 'GA': ['Georgia'],\n 'HI': ['Hawaii'],\n 'IA': ['Iowa'],\n 'ID': ['Idaho'],\n 'IL': ['Illinois'],\n 'IN': ['Indiana'],\n 'KS': ['Kansas'],\n 'KY': ['Kentucky'],\n 'LA': ['Louisiana', 'Los Angeles'],\n 'MA': ['Massachusetts'],\n 'MD': ['Maryland'],\n 'ME': ['Maine'],\n 'MI': ['Michigan'],\n 'MN': ['Minnesota'],\n 'MO': ['Missouri'],\n 'MS': ['Mississippi'],\n 'MT': ['Montana'],\n 'NC': ['North Carolin'],\n 'ND': ['North Dakota'],\n 'NE': ['Nebraska'],\n 'NH': ['New Hampshire'],\n 'NJ': ['New Jersey'],\n 'NM': ['New Mexico'],\n 'NV': ['Nevada'],\n 'NY': ['New York'],\n 'OH': ['Ohio'],\n 'OK': ['Oklahoma'],\n 'OR': ['Oregon'],\n 'PA': ['Pennsylvania'],\n 'RI': ['Rhode Island'],\n 'SC': ['South Carolin'],\n 'SD': ['South Dakota'],\n 'TN': ['Tennessee'],\n 'TX': ['Texas'],\n 'UT': ['Utah'],\n 'VA': ['Virginia'],\n 'VT': ['Vermont'],\n 'WA': ['Washington'],\n 'WI': ['Wisconsin'],\n 'WV': ['West Virginia'],\n 'WY': ['Wyoming']\n }\n\n location_candidates = []\n\n for key, potential_values in location_abbvreviations_US.items():\n add_me = False\n if key in question_tokens:\n add_me = True\n\n for sub_value in potential_values:\n if sub_value in question_tokens:\n add_me = True\n\n if add_me:\n location_candidates.append(key)\n location_candidates.extend(potential_values)\n\n for abbreviation in country_name_abbrevations_US:\n if abbreviation in question:\n # we don't know how to look for USA - therefore add all options. The database finder should sort them out.\n location_candidates.extend(country_name_abbrevations_US)\n\n for abbreviation in country_name_abbrevations_UK:\n if abbreviation in question:\n # we don't know how to look for United Kingdom - therefore add all options. The database finder should sort them out.\n location_candidates.extend(country_name_abbrevations_UK)\n\n return location_candidates", "def suggestion_dictionaries(text):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(text)\n for i, match in enumerate(matches):\n fromy = match.fromy\n fromx = match.fromx\n ruleId = match.ruleId\n replacements = match.replacements\n matches[i] = {\"fromx\": fromx, \"fromy\": fromy, \"ruleId\": ruleId, \"replacements\": replacements}\n return matches", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def test_search_other_language(self):\n term = \"ol\"\n expected_results = []\n request = RequestFactory().get(\"/ac/gateway-langnames/?q=\" + term)\n response = gateway_languages_autocomplete(request)\n data = json.loads(response.content)\n self.assertListEqual(data.get(\"results\"), expected_results)\n self.assertEqual(data.get(\"term\"), \"ol\")\n self.assertEqual(data.get(\"count\"), 0)", "def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass", "def lookup(self, term):\n results = []\n lookup_term = term.lower()\n for char, latex, description, user_description in self.entries:\n if (char == term or\n latex.startswith(lookup_term) or\n latex[1:].startswith(lookup_term) or\n lookup_term in description.lower() or\n (user_description and lookup_term in user_description)):\n results.append((char, latex, description, user_description))\n return results", "def lookup(*args):\n lemma, results = etym(*args)\n languages = nest()\n if not results:\n query, _, dictionary = args\n lemma, results = etym(query, None, dictionary)\n for result in results:\n languages[lemma][unicode(result['pos'])] = result['languages']\n return languages", "def get_suggestions(db_company):\n if db_company.archived:\n return {}\n\n names = [\n db_company.name,\n *db_company.trading_names,\n ]\n\n data = [\n *itertools.chain(\n *[name.split(' ') for name in names],\n ),\n *names,\n ]\n\n countries = [\n db_company.registered_address_country_id,\n db_company.address_country_id,\n ]\n\n return {\n 'input': get_unique_values_and_exclude_nulls_from_list(data),\n 'contexts': {\n 'country': get_unique_values_and_exclude_nulls_from_list(countries),\n },\n }", "def suggestions(request):\n # Get login user profile\n u_profile = UserProfile.objects.get(user=request.user)\n # Search by user profile\n sqs = SearchQuerySet().filter(business_area=u_profile.business_area, city=u_profile.city,\n handicap_36__lte=int(u_profile.handicap_36) + 1,\n handicap_36__gte=int(u_profile.handicap_36) - 1,\n handicap_us__lte=int(u_profile.handicap_us) + 1,\n handicap_us__gte=int(u_profile.handicap_us) - 1)\n # Get user list\n queryset = User.objects.all()\n # Create result list\n results_list = []\n # Get User to list by id\n max_loop = sqs.count()\n for x in range(0, max_loop):\n user = get_object_or_404(queryset, pk=sqs[x].object.id)\n results_list.append(user)\n # Convert to serializer\n serializer = UserSerializer(results_list, many=True)\n if serializer.is_valid:\n return Response({'status': '200', 'code': 'OK_SUGGESTION',\n 'detail': serializer.data}, status=200)\n else:\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': serializer.errors}, status=400)", "def features_search(df, type_, keywords):\n PLACES_KEY = os.environ[\"PLACES_KEY\"]\n output_file = \"json\"\n radius = \"1500\"\n lst = []\n\n for i in range(len(df)):\n coor = df[\"latitude\"][i].astype(str) + \", \" + df[\"longitude\"][i].astype(str)\n url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/\"+ output_file +\"?location=\"+coor +\"&radius=\" +radius+ \"&type=\"+type_+\"&keyword=\"+keywords + \"&key=\"+ PLACES_KEY\n res = requests.get(url)\n data = res.json()\n lst.append(len(data))\n \n return lst", "def test_search_gateway_language(self):\n term = \"gl\"\n expected_results = [dict(pk=self.gl.pk, lc=self.gl.lc, ln=self.gl.ln, lr=self.gl.lr, ang=self.gl.ang)]\n request = RequestFactory().get(\"/ac/gateway-langnames/?q=\" + term)\n response = gateway_languages_autocomplete(request)\n data = json.loads(response.content)\n self.assertListEqual(data.get(\"results\"), expected_results)\n self.assertEqual(data.get(\"term\"), term)\n self.assertEqual(data.get(\"count\"), 1)", "def locFromText(set_Country, textList, filterList):\n loc = []\n print('Start extracting locations from texts')\n for t in textList:\n # print(row)\n text = t[1]\n if len(text) > 0:\n text = re.sub(r'[^\\w]', ' ', text) # remove symbol\n\n places = geograpy.get_place_context(text=text)\n addStr = places.address_strings\n for add in addStr:\n country = add.split(',')[2] # get country name from extracted address_strings\n # print(country)\n if set_Country in country and not any(e in add for e in filterList):\n # print('City:', add)\n loc.append((t[0], add))\n return loc", "def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()", "def find_abecedarian_words():\n pass", "def get_suggestions(self, query_word: str, max_distance: int = 2) -> List[dict]:\n\n processed_query_word = self._pre_process(query_word)\n\n def search(dictionary_node: Dictionary, previous_row: list):\n \"\"\"Search for the candidates in the given dictionary node's children\n\n Args:\n dictionary_node (Dictionary): The node in the Trie dictionary\n previous_row (list): The previous row in the dynamic-programming approach\n \"\"\"\n\n for current_source_letter in dictionary_node.children:\n current_row = [previous_row[0] + 1]\n\n for i in range(1, len(processed_query_word) + 1):\n value = min(\n previous_row[i] + 1,\n current_row[i - 1] + 1,\n previous_row[i - 1]\n + self._replace(\n current_source_letter, processed_query_word[i - 1]\n ),\n )\n current_row.append(value)\n\n if (\n current_row[-1] <= max_distance\n and dictionary_node.children[current_source_letter].words_at_node\n is not None\n ):\n for word in dictionary_node.children[\n current_source_letter\n ].words_at_node:\n suggestions.append({\"word\": word, \"distance\": current_row[-1]})\n\n if min(current_row) <= max_distance:\n search(dictionary_node.children[current_source_letter], current_row)\n\n suggestions = list()\n\n first_row = range(0, len(processed_query_word) + 1)\n search(self._dictionary, first_row)\n\n suggestions = sort_list(suggestions, \"distance\")\n return suggestions", "def suggest_names(request):\n if request.method == \"GET\":\n contains = request.GET[\"q\"]\n artikel_list = Artikel.objects.filter(naziv__icontains=contains)\n if artikel_list:\n if len(artikel_list) > 12:\n artikel_list = artikel_list[:12]\n\n return render_to_response('invoices/lookup_artikel.html',\n {'seznam_artiklov': artikel_list}) #,\n #context_instance=RequestContext(request))", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def google_qa(question, **kwargs):\n nlp = StanfordCoreNLP('stanford-corenlp-full-2018-10-05', lang='zh')\n answer_scores = defaultdict(int)\n answer_types = [('PERSON',), ('STATE_OR_PROVINCE', 'CITY'), ('DATE', 'TIME')]\n if question.startswith('谁') or question.endswith('谁'):\n answer_type = answer_types[0]\n max_ngram = 1\n elif 'ε“ͺι‡Œ' in question:\n answer_type = answer_types[1]\n max_ngram = 2\n else:\n answer_type = answer_types[2]\n max_ngram = 3\n query_list = rewritten_queries(question)\n for query in query_list:\n for summary in get_summaries(query.query, **kwargs):\n for sentence in sentences(summary, nlp):\n for ngram in candidate_answers(sentence, query.query, answer_type, max_ngram):\n answer_scores[ngram] += ngram_score(\n ngram, query.score)\n ngrams_with_scores = sorted(answer_scores.items(),\n key=lambda x: x[1],\n reverse=True)\n return [(\"\".join(ngram), score)\n for (ngram, score) in ngrams_with_scores]" ]
[ "0.6738755", "0.6267569", "0.6113134", "0.6101123", "0.60902387", "0.5990677", "0.59711504", "0.587386", "0.5791649", "0.57696", "0.56940883", "0.5677929", "0.55937487", "0.5584599", "0.5582483", "0.55783206", "0.5576299", "0.5575877", "0.55594164", "0.5540818", "0.5529013", "0.55224496", "0.5520905", "0.5505097", "0.5502978", "0.5501244", "0.5485209", "0.5482247", "0.54581517", "0.54196036" ]
0.6916142
0
checks json file exists in a path
def doesJsonFileExists(json_filename): return os.path.exists(json_filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_exists(self):\n self.assertTrue(os.path.exists(\"file.json\"))", "def add_json_file_path(self):\n found = False\n for folder in json_folders:\n try_path = os.path.join(folder, 'part{}.json'.format(self.cbg))\n self.logger.debug(f'Considering path {try_path}.')\n if os.path.exists(try_path):\n found = True\n self.json_file_path = try_path\n break\n # Return True if the file is found.\n if found:\n return True\n else:\n self.logger.warning('cbg {} does not have a corresponding json file.'.format(self.cbg))\n return False", "def check_if_file_exists():\n try:\n if file_name in os.listdir(path_to_txt_file):\n return True\n else:\n with open(os.path.join(path_to_txt_file, file_name), \"a\") as f:\n json.dump({}, f)\n return True\n except:\n return False", "def is_json_path(location):\n if filetype.is_file(location):\n try:\n with open(location) as jsonfile:\n result = simplejson.load(jsonfile)\n if result:\n return True\n except:\n return False\n return False", "def exists(self, path):", "def check_for_json_folder(check_path):\n check_abspath = os.path.abspath(check_path)\n json_folders = [\"_JSON\", \"JSON\"]\n for jf in json_folders:\n if jf in check_abspath:\n print(\"{} folder exists : {}\".format(jf, check_abspath))\n top_path, base_path = check_abspath.split(\"{}/\".format(jf))\n out_path = os.path.dirname(os.path.join(top_path, base_path))\n if os.path.exists(out_path):\n print(\"Path exists : {}\".format(out_path))\n return out_path\n else:\n print(\"Path does not exist : {}\".format(out_path))\n print(\"Please create this folder and try again\")\n exit(1)", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def Exists(self, path: str) -> bool:\n ...", "def isjson(filepath):\n return filepath.lower().endswith('.json')", "def exists(self, path: PathLike):", "def _look_for_stat(self, dir):\n\n if os.path.exists('stat.json'):\n self.stat_files.append(os.path.join(dir,'stat.json'))\n return True\n else:\n return False", "def file_exists(cls, path: Path) -> bool:\n return path.exists()", "def exists(path):\n fs.exists(path)", "def file_exists(self, file_name):\n already_exists = False\n for file in os.listdir('saves'):\n if file.endswith('.json'):\n if file[:-5] == file_name:\n return True\n return False", "def file_exists(path):\n return os.path.exists(path)", "def file_exists(path):\n if path.startswith('gs://'):\n return gcsio.GcsIO().exists(path)\n else:\n return os.path.exists(path)", "def has_file(path):\n return os.path.exists(path)", "def is_file_exists(self):\n pass", "def check_if_file_exists(path):\n\n return os.path.exists(path)", "def search_existing_file(path):\n return os.path.isfile(path)", "def exists(path):\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok", "def path_exists(path):\r\n return os.path.exists(path)", "def exists(self, path: str) -> bool:\n pass", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def file_exists(self, path):\n return self._file_model.get_by_name(name=path) != []", "def path_exists(path):\n return os.path.exists(path)", "def file_exists(path: str) -> bool:\n\treturn os.path.isfile(path)", "def exists(path: str) -> bool:\n pass", "def check_json_file(file_name: str, excel_file: str, sheet_name: str) -> list:\n try:\n with open(file_name) as json_file:\n data = json.load(json_file)\n return data\n except FileNotFoundError:\n period_index = excel_file.index(\".\")\n json_file_name = excel_file[:period_index] + \".json\"\n write_json_file(json_file_name, convert_excel_file(excel_file, sheet_name))\n return check_json_file(file_name, excel_file, sheet_name)", "def file_exists(file_path):\n\n return Path(file_path).is_file()" ]
[ "0.81270504", "0.76794165", "0.7592552", "0.74362725", "0.7244022", "0.7100581", "0.6954963", "0.6929455", "0.6871901", "0.684659", "0.68414944", "0.6825681", "0.68075573", "0.6801073", "0.67956764", "0.6789363", "0.67673874", "0.676294", "0.6748578", "0.6748486", "0.67347926", "0.66407204", "0.663513", "0.6609426", "0.65770876", "0.65623254", "0.653839", "0.6527741", "0.6527323", "0.6524578" ]
0.83387053
0
Return all URLs available on a webpage when given an URL
def urls_in_url(url): global url_to_check_manually try: """Return all URLs when given an url""" html = urlopen(url) bsObj = BeautifulSoup(html.read(), "lxml") list_url = [] for link in bsObj.find_all('a'): sublink = link.get('href') try: list_url.append(str(sublink)) except: pass return list_url except: print('Impossible to open URL :', url) url_to_check_manually.append(url) return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_urls(url):\n try:\n #sock = urllib2.urlopen(url)\n result = urlfetch.fetch(url)\n sock = result.content\n parser = URLParser()\n #print sock.read()\n parser.feed(sock.read())\n sock.close()\n parser.close()\n return parser.urls\n except: # This is to take care of links that are not valid.\n return []", "def scrape_urls(webpage):\n html = requests.get(webpage)\n urls = []\n pattern = r\"\"\"\n http[s]?://(?:[a-zA-Z]|\n [0-9]|[$-_@.&+]|[!*\\(\\),]|\n (?:%[0-9a-fA-F][0-9a-fA-F]))+\"\"\"\n url_regex = re.compile(pattern, re.VERBOSE)\n urls = url_regex.findall(html.text)\n return urls", "def get_links_from_url(url):\n return [get_base(url)]", "def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book", "def fetch_urls(self, html):\n urls = []\n all_urls = set()\n dom = lh.fromstring(html)\n for href in dom.xpath('//a/@href'):\n url = urljoin(self.base_url, href)\n path = urlparse(url).path\n ext = os.path.splitext(path)[1]\n if bool(ext) and ext not in ['.html', '.htm']:\n continue\n if url not in self.visited_urls and url.startswith(self.base_url):\n urls.append(url)\n if url not in all_urls and url.startswith(self.base_url):\n all_urls.add(url)\n return urls, all_urls", "def scan_links_from_url(url):\n\n\t#Get the url\n\thtml_io = StringIO.StringIO()\n\n\tcurl = pycurl.Curl()\n\tcurl.setopt(pycurl.URL, str(url))\n\tcurl.setopt(pycurl.WRITEFUNCTION, html_io.write)\n\tcurl.perform()\n\n\thtml = html_io.getvalue()\n\n\thtml_io.close()\n\tcurl.close()\n\n\t#Apply the regex expression and fetch all links from source\n\tregexp = re.compile(\"\"\"http\\:\\/\\/rapidshare\\.(?:com|de)\\/files\\/[\\d]*\\/.*?\\..*?[^\"\\s\\<\\>]*[^.,;'\">\\:\\s\\<\\>\\)\\]\\!]\"\"\")\n\n\treturn regexp.findall(html)", "def getURLs():", "def fetch_all_links(url):\n\n url_list = []\n try:\n r = requests.get(url)\n if r.status_code == 200:\n\n logg('Fetching in page links...')\n #print r.status_code\n content = r.content\n soup = BeautifulSoup(content, \"lxml\")\n\n # scan for all anchor tags\n tags = soup('a')\n\n for a in tags:\n href = a.get(\"href\")\n if href is not None:\n new_url = urlparse.urljoin(url, href)\n if new_url not in url_list:\n url_list.append(make_clean_url(new_url))\n return url_list\n\n elif r.status_code == 403:\n print \"Error: 403 Forbidden url\"\n elif r.status_code == 404:\n print \"Error: 404 URL not found\"\n else:\n print \"Make sure you have everything correct.\"\n\n except requests.exceptions.ConnectionError, e:\n print \"Oops! Connection Error. Try again\"", "def getlinks(url):\n page = Linkfetcher(url)\n page.linkfetch()\n for i, url in enumerate(page):\n print(\"%d ==> %s\" % (i, url))", "def get_urls():\r\n return []", "def parse_page(url):\n page_content = download_page(url)\n if page_content:\n link_list = re.findall('src=\"(.*?)\"', page_content)\n if len(link_list) == 0:\n print('get 0 links from page {0}'.format(url))\n logging.info('get 0 links from page {0}'.format(url))\n return set()\n else:\n return set(link_list)\n else:\n return set()", "def getUrls(url):\n f = requests.get(url)\n p = MyParser()\n p.feed(f.text)\n list_of_urls = p.output_list\n #deal with possible strange None values\n list_of_urls = [url for url in list_of_urls if url is not None]\n for url in list_of_urls:\n if 'http' not in url: list_of_urls.remove(url)\n return list_of_urls", "def get_urls(r):\n url_list = find_urls(r)\n url_list += find_tag_urls(r)\n return set(url_list)", "def urls(self) -> list[str]:\r\n ...", "def getSolutionUrls(url: str) -> list:\n \n try:\n response = get(url)\n html_soup = BeautifulSoup(response.text, 'html.parser')\n soln_containers = html_soup.find_all('div', class_ = 'result')\n solutionUrls = [website_url + container.h1.a[\"href\"] for container in soln_containers]\n return solutionUrls\n except:\n print(\"getSolutionUrls: URL error: \" + str(url))\n return None", "def get_urls(root):\n urls = []\n classes = \"|\".join([\"msl_organisation_list\", \"view-uclu-societies-directory\",\n \"atoz-container\", \"listsocieties\", \"block-og-menu\"])\n\n req = requests.get(root, headers) # , cookies=cookies)\n soup = BeautifulSoup(req.content, 'html.parser')\n main = soup.find(['div', 'ul', 'section'], class_=re.compile(classes))\n\n for a in main.find_all('a', href=True):\n url = a['href']\n if url.startswith(\"/\"):\n urls.append(domain + url)\n\n if url.startswith(\"https://society.tedu.edu\"):\n urls.append(url)\n\n urls = list(dict.fromkeys(urls))\n return urls", "def get_urls(url, list_of_urls, main_url):\n if url in list_of_urls:\n return list_of_urls\n\n try:\n content = urlopen(url).read()\n soup = BeautifulSoup(content, \"html.parser\")\n list_of_urls.append(url)\n\n links = [urljoin(url, link.get('href')) for link in soup.find_all('a') if not urljoin(url,\n link.get('href')).endswith(EXTENSIONS)]\n for link in links:\n if link.startswith(main_url) and link not in list_of_urls:\n list_of_urls = get_urls(link, list_of_urls, main_url)\n\n except urllib.error.HTTPError or ValueError:\n pass\n\n return list_of_urls", "def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list", "def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]", "def google_find_urls(keyword):\n return [ result['url'] for result in google_search(keyword) ]", "def parse_page(url):\n page_content = download_page(url)\n if page_content:\n link_list = re.findall('\"ou\":\"(.*?)\"', page_content)\n if len(link_list) == 0:\n print('get 0 links from page {0}'.format(url))\n logging.info('get 0 links from page {0}'.format(url))\n return set()\n else:\n return set(link_list)\n else:\n return set()", "def retrieve_links (url):\n # Define opener for the URL\n opener = urllib2.build_opener ()\n \n # Catch exceptions related to the URL opening\n try:\n # Read content from URL\n t = opener.open (url).read ()\n \n # Parse content read\n parser = Soup(t)\n \n # Obtain only links from URL provided\n return [x['href'] for x in parser.findAll('a') if x.has_attr('href')]\n\n # Capture possible message exceptions related to the URL opening\n except urllib2.URLError:\n return []", "def getURLs(modelURL):\n\n #Get model page as soup\n soup, _ = getPage(modelURL)\n\n #Check if page available\n if soup is None:\n #Not available - Break\n print(\"Can't find Model URL\")\n quit()\n \n #Get URLs on first page\n urlList = listingURLs(soup)\n\n #Find last page number if available\n try:\n lastPageURL = soup.find(class_=\"page-number-navigation__link page-number-navigation__link-last link link--base-color-primary link--hover-color-none link--no-underline\")['href']\n lastPage = int(re.search('page-(\\d+)', lastPageURL).group(1))\n except:\n #No Last page button - Only one page of results\n lastPage = None\n\n #Loop for all pages if available\n if lastPage is not None:\n for i in range(2, lastPage + 1):\n #Create Page URL\n urlParts = modelURL.split(\"/\")\n urlParts = urlParts[:-1] + [f\"page-{i}\"] + urlParts[-1:]\n pageURL = \"/\".join(urlParts)\n #Get Page\n soup, _ = getPage(pageURL)\n #Check if page available\n if soup is None:\n #Not available, skip iteration\n continue\n #Get Pages URLs\n urlList += listingURLs(soup)\n\n return urlList", "def getPuzzleUrls(url: str) -> list:\n try:\n response = get(url)\n html_soup = BeautifulSoup(response.text, 'html.parser')\n puzzle_containers = html_soup.find_all('div', class_ = 'result')\n puzzle_urls = [website_url + container.a[\"href\"] for container in puzzle_containers]\n return puzzle_urls\n \n except:\n print(\"getPuzzleUrls: URL error \" + str(url))\n return None", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def get_urls(base_url):\n res = requests.get(base_url, headers=HEADERS)\n res = BeautifulSoup(res.text, 'html.parser')\n res = res.find_all(href=re.compile('pdf'))\n return res", "def get_page_urls(self, html_page):\n soup = BeautifulSoup(html_page, 'html.parser')\n links = [link.get('href') for link in soup.find_all('a') if link.get('href') != None]\n return(links)", "def find_urls(r):\n http_match = re.findall(r'https:\\/\\/[\\w\\/?=.-]+', r)\n url_list = []\n if http_match:\n for match in http_match:\n if match not in url_list:\n url_list.append(match)\n return url_list", "def fetch_all_urls(session, urls):\n return [fetch_url(session, url) for url in urls]", "def _get_urls(soup: bs4.BeautifulSoup, keyword: str=\"\") -> list:\n valid_urls = []\n tag = soup.find_all('a')\n for text in tag:\n href_text = text.get('href')\n url = href_text[href_text.find('http'):]\n if keyword and keyword not in url:\n pass\n else:\n if \"http\" in url and not any(\n invalid_url in url for invalid_url in FILTER_URLS\n ):\n valid_urls.append(url)\n return valid_urls" ]
[ "0.78919035", "0.77645457", "0.77268726", "0.75712234", "0.7567013", "0.7351647", "0.73447627", "0.73119706", "0.73077196", "0.7221987", "0.72173417", "0.7216986", "0.7213785", "0.719121", "0.7184347", "0.71809256", "0.71319926", "0.70663166", "0.70220536", "0.70175326", "0.7011658", "0.70100003", "0.7009424", "0.69512534", "0.69318014", "0.6908782", "0.69072294", "0.6896645", "0.68804127", "0.6817863" ]
0.7906353
0
Returns a list of slashes indices in a string
def get_all_slash_indices(url): list_slash_indices = [] slash_ndx = 0 while slash_ndx != -1: slash_ndx = url.find('/', slash_ndx + 1) if slash_ndx != -1: list_slash_indices.append(slash_ndx) return list_slash_indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def lines(string:str) -> List[int]:\n # find all index with new lines\n return find(string, '\\n')", "def find(string:str, char:str) -> List[int]:\n return [i for i, ltr in enumerate(string) if ltr == char]", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def get_index(s):\n return int(s[s.find(\"[\")+1:s.find(\"]\")])", "def PathNames(xpath: Text) -> List[Text]:\n if not xpath or xpath == '/': # A blank xpath was provided.\n return []\n return re.split(r'''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''',\n xpath.strip('/').strip('/')) # Removes leading/trailing '/'.", "def _path_names(xpath):\n if not xpath or xpath == '/': # A blank xpath was provided at CLI.\n return []\n return xpath.strip().strip('/').split('/') # Remove leading and trailing '/'. For example it turns it into ['interfaces', 'interface[name=Ethernet1]', 'state', 'counters']", "def tokenize_path(path):\n # form a list of tuples that mark the start and end positions of steps\n separators = []\n last_position = 0\n i = -1\n in_string = False\n while i < len(path) - 1:\n i = i + 1\n if path[i] == \"'\":\n in_string = not in_string\n if in_string:\n # slashes within strings are not step separators\n continue\n if path[i] == '/':\n if i > 0:\n separators.append((last_position, i))\n if (path[i+1] == '/'):\n last_position = i\n i = i + 1\n else:\n last_position = i + 1\n separators.append((last_position, len(path)))\n\n steps = []\n for start, end in separators:\n steps.append(path[start:end])\n return steps", "def getIndexes(string1,string2):\n ret = []\n ind = string1.find(string2)\n \n while (ind > -1 and ind < len(string1)):\n ret.append(ind)\n ind = string1.find(string2,ind + 1)\n \n return ret", "def slash_count(l):\r\n l= str(l)\r\n if l.count('/')<5:\r\n return 0\r\n elif l.count('/')>=5 and l.count('/')<=7:\r\n return 2\r\n else:\r\n return 1", "def split_string(string: str, indices: list) -> list:\n return [string[n:m] for n, m in zip(indices[:-1], indices[1:])]", "def find_strings(input):\n\n string_ranges = []\n\n def count_backslashes(input, from_pos):\n num_backslashes = 0\n i = from_pos\n while i >= 0:\n if input[i] == '\\\\':\n num_backslashes += 1\n i -= 1\n else:\n break\n return num_backslashes\n\n for delim in STRING_DELIMITERS:\n start = -1\n for i in infinite():\n first = input.find(delim, start + 1)\n if first == -1: break # to next delim\n start = first + 1\n if count_backslashes(input, first - 1) % 2 != 0: continue # Esacped: to next delim\n next = first\n for i in infinite():\n next = input.find(delim, next + 1)\n if next == -1: break # to next delim\n if count_backslashes(input, next - 1) % 2 == 0: break # Not escaped: stop looking\n\n if next == -1: # ??? unmatches quotations\n string_ranges.append((first, len(input)))\n break # to next delim\n start = next\n string_ranges.append((first, next))\n\n return sorted(string_ranges)", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def parse_path(path: str) -> list:\n arr = path.split(\"/\")\n if arr[0] == \"m\":\n arr = arr[1:]\n if len(arr) == 0:\n return []\n if arr[-1] == \"\":\n # trailing slash\n arr = arr[:-1]\n for i, e in enumerate(arr):\n if e[-1] == \"h\" or e[-1] == \"'\":\n arr[i] = int(e[:-1]) + 0x80000000\n else:\n arr[i] = int(e)\n return arr", "def jp_split(s):\n if s == '' or s == None:\n return []\n\n def _decode(s):\n s = s.replace('~1', '/')\n return s.replace('~0', '~')\n\n return [_decode(ss) for ss in s.split('/')]", "def locate_char(c, s):\n return [i for i, l in enumerate(s) if l == c]", "def char_iter(string: str) -> Generator:\n for slash, ch in re.findall(r\"(\\\\?)([\\s\\S])\", string):\n yield slash + ch", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def index(self, path):\n try:\n indices = [int(x) if x.isdigit() else x for x in split(r'[\\/\\[\\]]+', path[1:])]\n return reduce(lambda x, y: x[y], indices, self.document)\n except:\n return None", "def extract(s: str, delimiter: str=DELIMITER) -> []:\n s += DELIMITER\n # create rotations\n rotations = [s[index:] + s[:index] for index in range(len(s))]\n rotations.sort()\n return list(enumerate(rotations))", "def separate_string_pattern_in_notes(pattern):\n output = []\n cont = 0\n for idx in range(len(pattern) - 1):\n if pattern[idx + 1] == \"#\" or pattern[idx + 1] == \"-\":\n output.append(pattern[idx] + pattern[idx + 1])\n elif pattern[idx] != \"#\" and pattern[idx] != \"-\":\n output.append(pattern[idx])\n if pattern[-1] != \"#\" and pattern[-1] != \"-\":\n output.append(pattern[-1])\n return output", "def uri_piece_to_list(uri):\n pieces = uri.split(u'/')\n assert pieces[0] == '['\n assert pieces[-1] == ']'\n chunks = []\n current = []\n depth = 0\n for piece in pieces[1:-1]:\n if piece == u',' and depth == 0:\n chunks.append('/' + '/'.join(current))\n current = []\n else:\n current.append(piece)\n if piece == '[':\n depth += 1\n elif piece == ']':\n depth -= 1\n chunks.append('/' + '/'.join(current))\n return chunks", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def _regex_split(pattern, string):\n splits = list((m.start(), m.end()) for m in re.finditer(pattern, string))\n starts = [0] + [i[1] for i in splits]\n ends = [i[0] for i in splits] + [len(string)]\n return [string[start:end] for start, end in zip(starts, ends)]", "def read(string):\n\treturn (re.finditer('(?<=\\[)[a-z]+(?=\\])', string), re.finditer('(?<=\\])[a-z]+|[a-z]+(?=\\[)', string))", "def path_elements(path):\n result = []\n (head, tail) = os.path.split(path)\n while tail != \"\":\n result.insert(0, tail)\n (head, tail) = os.path.split(head)\n result.insert(0, head)\n return result", "def word_index_separator(input_str_ws):\r\n\r\n word_index_list = []\r\n\r\n \"\"\" Loops over matches for words in input string, and saves index values to\r\n array. \"\"\"\r\n for match_ws in re.finditer(r'\\b\\w+(?:[@\\'.]\\w|\\w)*', input_str_ws):\r\n temp_word_index_tuple = (match_ws.start(), match_ws.end() - 1)\r\n word_index_list.append(temp_word_index_tuple)\r\n\r\n # Returns the word index list, converted to a tuple\r\n return tuple(word_index_list)", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def split_by_hash_sign(path: str) -> List[str]:\n if \"#\" in path:\n split_path = path.split(\"#\")\n if len(split_path) > 2:\n raise Exception(f\"There should be maximum one '#' in the path {path}\")\n return split_path\n return [path]", "def split_escaped(string, separator):\n\n result = []\n current = ''\n escaped = False\n for char in string:\n if not escaped:\n if char == '\\\\':\n escaped = True\n continue\n elif char == separator:\n result.append(current)\n current = ''\n continue\n escaped = False\n current += char\n result.append(current)\n return result" ]
[ "0.6580785", "0.5898524", "0.58867925", "0.5872233", "0.5859471", "0.5855735", "0.58529186", "0.5785244", "0.5772639", "0.57638735", "0.57608557", "0.575868", "0.5753656", "0.5733558", "0.5691506", "0.5681961", "0.56470704", "0.56438243", "0.5619379", "0.55685306", "0.55645937", "0.55210567", "0.55142283", "0.5486956", "0.54610324", "0.54443663", "0.54388523", "0.54298925", "0.5424106", "0.5405333" ]
0.73646647
0
yields all nonhypoisomorphic hypo_indicators, each of which is a list of lists, where val = [ihypos], ind = ipatt.
def iter_hypo_indicator(nhypo, n_pattern, n_overlap): # for i, x in enumerate(iter_hypo_indicator(2,6,5)): # print(i, x) base_bag = [[]] base_count = 0 additional_bag =[[]] additional_count = 0 for hypo_base in pattern_hypo_product_space(nhypo, n_pattern): if hypo_indicator_filter(hypo_base, nhypo, base_bag): base_bag.append([]) base_count += 1 base_bag[base_count] = hypo_base # print(base_bag) for hypo_overlap in pattern_powerhypo_product_space(nhypo-1, n_pattern): if overlap_filter(hypo_overlap, n_overlap): hypo_overlap = remap_overlap_indicator(hypo_overlap, hypo_base, nhypo) hypo_indicator = concatenate_hypo_indicators(hypo_base, hypo_overlap) if not is_hypobag_isomorphic(additional_bag, hypo_indicator, nhypo): additional_bag.append([]) additional_count += 1 additional_bag[additional_count] = hypo_indicator # print(additional_bag) yield hypo_indicator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_all_hypo_isomorphic(hypo_indicator, nhypo):\n hypo_ind = [i for i in range(nhypo)]\n for permuted in uperm(hypo_ind):\n perm_hypo_indicator = []\n for li in hypo_indicator:\n if len(li) >= 1:\n perm_li = [permuted[v] for v in li]\n perm_hypo_indicator.append(sorted(perm_li))\n elif len(li) == 0:\n perm_hypo_indicator.append(li)\n yield perm_hypo_indicator", "def _get_initial_hypos(self):\n return [CombiStatePartialHypo(self.get_predictor_states())]", "def derive_new_hyps(self, src_sentence, hyp):\n new_hyps = []\n for i in xrange(src_sentence):\n if hyp.covering[i] == '1':\n continue\n cur_phrase = []\n j = 0\n while i+j < len(src_sentence):\n phrase.append(src_sentence[i + j])\n if phrase in self.phrase_table and hyp.covering[i + j] == '0':\n for native_phrase, prob in self.phrase_table[phrase]:\n new_hyp = Hypothesis(native_phrase, hyp.covering[:i] + '1' * (j + 1 - i) + covering[j+1:], i, i+j, hyp, self.heuristic_table, self.language_model, self.phrase_table)\n new_hyps.append(new_hyp)\n j += 1\n else:\n break\n return new_hyps", "def phis ( self ) :\n return tuple ( self.__phis )", "def noisy_cells(self, hists, thresholds):\n return [[[x + 1, z + 1, i + 1] for x in range(h.GetNbinsX()) for z in range(h.GetNbinsY()) if h.GetBinContent(x + 1, z + 1) > threshold] for i, (h, threshold) in enumerate(zip(hists, thresholds))]", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)", "def annihilation_list(self,other):\n if not isinstance(other,SlaterDeterminant):\n raise TypeError(\"Parameter other must be a SlaterDeterminant instance.\")\n diff = np.array(other) - np.array(self)\n if np.sum(np.abs(diff)) == 0: \n indices = []\n else:\n indices = np.where(diff==-1)[0].tolist()\n return indices", "def removehypfromcounts(self,ixHyp):\n # hypothesis[-1] is tuple of (ixChar,ixRoom,ixWeap)\n # Use numpy tuple indexing for the scenario mx:\n self.hypCountByScenario[self.hypotheses[ixHyp][-1]] -= 1\n # For 1D numpy arrays, use scalar indexing:\n self.hypCountByCharacter[self.hypotheses[ixHyp][-1][0]] -= 1\n self.hypCountByRoom[self.hypotheses[ixHyp][-1][1]] -= 1\n self.hypCountByWeapon[self.hypotheses[ixHyp][-1][2]] -= 1", "def phenotype(indiv):\n pheno = [[id, problem['weights'][id], problem['values'][id]] for id in range(len(indiv)) if indiv[id] == 1]\n return pheno", "def phrases(instr_pianorolls, phrase_length):\n X_instr_phrases = []\n y_instr_phrases = []\n\n for j, song in enumerate(instr_pianorolls):\n \n phrase_end = phrase_length # initialize the end of a phrase to be 4 bars from first tick\n # print(j)\n for phrase_start in range(0,len(song)-phrase_length + 1, phrase_length):\n # print(phrase_start, phrase_end)\n y_phrase = song[phrase_start:phrase_end] # grab a phrase\n \n if(np.any(np.count_nonzero(y_phrase, axis=1)) > 0): # if any string bar is not empty\n X_phrase = instr_pianorolls[j][phrase_start:phrase_end] # grab the input phrase at same song/indices\n \n if(np.any(np.count_nonzero(X_phrase, axis=1)) > 0):# if any melody bar is not empty\n y_instr_phrases.append(y_phrase)\n X_instr_phrases.append(X_phrase)\n # print(\"adding phrases\")\n \n phrase_end += phrase_length\n\n return X_instr_phrases, y_instr_phrases", "def hyponym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hypo = s.hyponyms()\n\n results = list()\n for h in hypo:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n # TODO: Exception when not an int\n return results[:sense + 1]", "def prune_hyp(hyp):\r\n if EOS_ID in hyp:\r\n idx = hyp.index(EOS_ID)\r\n return hyp[:idx]\r\n else:\r\n return hyp", "def getNonShapeUncertainties(self, pars = []):\n\t\tif not self.chi2init:\n\t\t\traise RuntimeError(\"Chi2 not initialized, cannot evaluate\")\n\t\tif len(pars) > 0:\n\t\t\tself.setShapeParameters(pars)\n\t\tA,B,C = self.getOwnTheoryABC()\n\t\tunc = []\n\t\tfor i in range(len(A)):\n\t\t\tunc.append(A[i,i]**-.5)\n\t\treturn unc", "def prune_hyp(hyp):\n if constant.EOS_ID in hyp:\n idx = hyp.index(constant.EOS_ID)\n return hyp[:idx]\n else:\n return hyp", "def getNaturalIsotopics(self):\n return\n yield", "def getNaturalIsotopics(self):\n return\n yield", "def __IPAS_helper(self, inh, ass):\r\n list_of_ipas_relation = list()\r\n if inh is None or ass is None:\r\n logger.info(\"There are no IPAS relations\")\r\n else:\r\n for parent_and_child in inh:\r\n parent = parent_and_child.attrib.get(\"ci\")\r\n child = parent_and_child.attrib.get(\"cj\")\r\n for relation in ass:\r\n if parent == relation.attrib.get(\"cj\"):\r\n ipas_tuple = (parent, child, relation.attrib.get(\"ci\"))\r\n list_of_ipas_relation.append(ipas_tuple)\r\n logger.debug(\"Found IPAS: (%s, %s, %s)\" % (ipas_tuple[0], ipas_tuple[1], ipas_tuple[2]))\r\n list_of_ipas_relation = list(dict.fromkeys(list_of_ipas_relation))\r\n return list_of_ipas_relation", "def listNonDegenerate(self):\n return arange(self.nelems())[~self.testDegenerate()]", "def ideal_HNF(I):\n N = I.norm()\n (a, c), (b, d) = [[ZZ(x) for x in row] for row in I.pari_hnf().python()]\n assert a > 0 and d > 0 and N == a * d and d.divides(a) and d.divides(b) and 0 <= c < a\n return [a, c, d]", "def noh(ls, dsets):\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set", "def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]", "def _filter_unanswerable_samples(self):\n a = []\n q = []\n annotations = []\n for i in range(len(self.answers)):\n if len(self.answers[i].nonzero()) > 0:\n a.append(self.answers[i])\n q.append(self.questions[i])\n\n annotations.append(self.annotations[i])\n self.answers = a\n self.questions = q\n self.annotations = annotations", "def getBondsNoH(self):\n try:\n return self._bondListNoH\n except AttributeError:\n pass\n bondPointers=self._raw_data[\"BONDS_WITHOUT_HYDROGEN\"]\n self._bondListNoH = self._getBonds(bondPointers)\n return self._bondListNoH", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def Hrep_generator(self):\n for H in self.Hrepresentation():\n yield H", "def iter_unsolved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif not cell.value()\n\t\t)", "def halo_progenitor_ids(self, index):\n _progenitors = []\n\n def rec(i):\n _progenitor_ids = self.data[self.data[\"descendantHost\"] == i][\n \"hostIndex\"\n ].unique()\n\n logging.debug(\"Progenitors recursion: %d > %d (%d progenitors)\",\n index,\n i,\n len(_progenitor_ids))\n \n if len(_progenitor_ids) == 0:\n return\n \n for _progenitor_id in _progenitor_ids:\n # if _progenitor_id not in _progenitors:\n # TODO: this only eliminates fly-byes\n _progenitors.append(_progenitor_id)\n rec(_progenitor_id)\n\n rec(index)\n\n logging.info(\n \"%d progenitors found for halo %d\", len(_progenitors), index\n )\n return _progenitors", "def specified_unchanging_attributes(self) -> List[int]:\n indices = []\n\n for idx, (cpi, epi) in enumerate(zip(self.condition, self.effect)):\n if isinstance(epi, ProbabilityEnhancedAttribute):\n if cpi != self.cfg.classifier_wildcard and \\\n epi.does_contain(cpi):\n indices.append(idx)\n else:\n if cpi != self.cfg.classifier_wildcard and \\\n epi == self.cfg.classifier_wildcard:\n indices.append(idx)\n\n return indices" ]
[ "0.7172562", "0.57043946", "0.51082957", "0.50979763", "0.5024379", "0.50155944", "0.50155944", "0.5006111", "0.49870583", "0.49818754", "0.49676302", "0.4961464", "0.4960568", "0.4919646", "0.49182004", "0.4864977", "0.4855658", "0.4855658", "0.48522", "0.48485523", "0.48262048", "0.48256078", "0.48210722", "0.48124418", "0.4769759", "0.4761549", "0.47549272", "0.47501564", "0.4747365", "0.4745143" ]
0.67800134
1
Yields all hypo isomorphic indicators of input. Can deal with empty values.
def iter_all_hypo_isomorphic(hypo_indicator, nhypo): hypo_ind = [i for i in range(nhypo)] for permuted in uperm(hypo_ind): perm_hypo_indicator = [] for li in hypo_indicator: if len(li) >= 1: perm_li = [permuted[v] for v in li] perm_hypo_indicator.append(sorted(perm_li)) elif len(li) == 0: perm_hypo_indicator.append(li) yield perm_hypo_indicator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_hypo_indicator(nhypo, n_pattern, n_overlap):\n # for i, x in enumerate(iter_hypo_indicator(2,6,5)):\n # print(i, x)\n base_bag = [[]]\n base_count = 0\n additional_bag =[[]]\n additional_count = 0\n for hypo_base in pattern_hypo_product_space(nhypo, n_pattern):\n if hypo_indicator_filter(hypo_base, nhypo, base_bag):\n base_bag.append([])\n base_count += 1\n base_bag[base_count] = hypo_base\n # print(base_bag)\n for hypo_overlap in pattern_powerhypo_product_space(nhypo-1, n_pattern):\n if overlap_filter(hypo_overlap, n_overlap):\n hypo_overlap = remap_overlap_indicator(hypo_overlap, hypo_base, nhypo)\n hypo_indicator = concatenate_hypo_indicators(hypo_base, hypo_overlap)\n if not is_hypobag_isomorphic(additional_bag, hypo_indicator, nhypo):\n additional_bag.append([])\n additional_count += 1\n additional_bag[additional_count] = hypo_indicator\n # print(additional_bag)\n yield hypo_indicator", "def inequality_generator(self):\n for H in self.Hrepresentation():\n if H.is_inequality():\n yield H", "def _get_initial_hypos(self):\n return [CombiStatePartialHypo(self.get_predictor_states())]", "def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]", "def equation_generator(self):\n for H in self.Hrepresentation():\n if H.is_equation():\n yield H", "def getNaturalIsotopics(self):\n return\n yield", "def getNaturalIsotopics(self):\n return\n yield", "def Hrep_generator(self):\n for H in self.Hrepresentation():\n yield H", "def get_harmonics(self, f):\n for h in range(self.harmonics_arr_size):\n yield f * (h + 1)", "def iter_minterms(self):\n for point in self.iter_ones():\n space = [(v if val else -v) for v, val in point.items()]\n yield And(*space)", "def _embellish(response_data):\n kanji_script = scripts.Script.Kanji\n for pivot, pivot_type, is_correct, timestamp in response_data:\n yield (pivot, pivot_type, is_correct, timestamp)\n if pivot_type == 'w' and scripts.contains_script(kanji_script, pivot):\n for kanji in scripts.unique_kanji(pivot):\n yield kanji, 'k', is_correct, timestamp", "def yield_isosceles_triangles(cls):\n for a in range(1, 101):\n for c in range(a + 1, 2 * a):\n if 2 * (a ** 2) != c ** 2:\n yield a, a, c", "def iter_maxterms(self):\n for point in self.iter_zeros():\n space = [(-v if val else v) for v, val in point.items()]\n yield Or(*space)", "def em(indiv_dict, hyplo_collection):\r\n\thyplo_dict=defaultdict(float)\r\n\tres=[]\r\n\tres_pairs=[]\r\n\tstart_fre= np.random.dirichlet(np.ones(len(hyplo_collection)),size=1)[0]\r\n\ti=0\r\n\tfor x in hyplo_collection:\r\n\t\thyplo_dict[x]=start_fre[i]\r\n\t\ti+=1\r\n\tprev_dict=hyplo_dict\r\n\thyplo_dict=phase_cnt(indiv_dict, hyplo_dict)\r\n\twhile True:\r\n\t\tif check_converge(prev_dict, hyplo_dict)==False:\r\n\t\t\tprev_dict=hyplo_dict\r\n\t\t\thyplo_dict=phase_cnt(indiv_dict, hyplo_dict)\r\n\t\telse:\r\n\t\t\tbreak\r\n\tfor k,v in indiv_dict.iteritems():\r\n\t\tpair=get_best(v, hyplo_dict)\r\n\t\tres+=pair\r\n\t\tres_pairs.append(pair)\r\n\tkey_list=indiv_dict.keys()\r\n\treturn list(set(res)), res_pairs", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)", "def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]", "def harmonics(self) -> List[int]:\n return tones_to_harmonic_segment(self.tones)", "def iter_alpha_helicies(self):\n if self.default_model:\n return self.default_model.iter_alpha_helicies()\n return iter(list())", "def items():\n for output in outputs:\n if isinstance(output, boolfunc.Function):\n output = output.unbox()\n if output in (0, \"0\"):\n yield PC_ZERO\n elif output in (1, \"1\"):\n yield PC_ONE\n elif output in \"-xX\":\n yield PC_DC\n else:\n fstr = \"expected output in [01-xX], got {}\"\n raise ValueError(fstr.format(output))", "def yield_right_isosceles_triangles(cls):\n for i in range(1, 142):\n yield 1 * i, 1 * i, sqrt(2) * i", "def yield_equilateral_triangles(cls):\n for i in range(1, 201):\n yield i-.5, i-.5, i-.5\n yield i, i, i", "def h():\r\n x,y = 1,1\r\n while True:\r\n yield x\r\n x = x + (y)*(-1)**y\r\n y = y + 1", "def homothin():\n\n Iab = se2hmt(binary([[0,0,0],\n [0,1,0],\n [1,1,1]]),\n binary([[1,1,1],\n [0,0,0],\n [0,0,0]]))\n return Iab", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def __emptygen():\n if False:\n yield", "def noh(ls, dsets):\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set", "def inverted(values, input_min=0, input_max=1):\n values = _normalize(values)\n if input_min >= input_max:\n raise ValueError('input_min must be smaller than input_max')\n for v in values:\n yield input_min + input_max - v", "def iter_alpha_helicies(self):\n return iter(self.alpha_helix_list)", "def prune_hyp(hyp):\r\n if EOS_ID in hyp:\r\n idx = hyp.index(EOS_ID)\r\n return hyp[:idx]\r\n else:\r\n return hyp", "def _select_torsions_without_h(self, torsion_list):\n heavy_torsions = []\n for torsion in torsion_list:\n is_h_present = [torsion.a.IsHydrogen(), torsion.b.IsHydrogen(), torsion.c.IsHydrogen(), torsion.d.IsHydrogen()]\n if all(entry == False for entry in is_h_present):\n heavy_torsions.append(torsion)\n else:\n #there is a hydrogen in this torsion, so it is omitted\n pass\n\n return heavy_torsions" ]
[ "0.6780316", "0.59080976", "0.58561957", "0.5720207", "0.56647605", "0.56529135", "0.56529135", "0.54241025", "0.52539307", "0.5200523", "0.51851517", "0.51129967", "0.50811005", "0.5075875", "0.49981025", "0.49817088", "0.49561143", "0.495255", "0.4951932", "0.49485445", "0.49307021", "0.49200365", "0.48768184", "0.48645884", "0.48217374", "0.4809404", "0.4798567", "0.47935298", "0.47851995", "0.4773075" ]
0.7139706
0
Distributed wordcount Flow successfully executes using a Local Dask cluster. The Flow run's state returns word count tuples stored in the state's associated Result object.
def test_mapreduce_wordcount(): state = mapreduce_wordcount.run( url="https://raw.githubusercontent.com/topher-lo/prefect-with-k8/main/src/prefect_kube_demo/data/dream.txt", executor=DaskExecutor(), ) task_ref = mapreduce_wordcount.get_tasks("reducer")[0] result = state.result[task_ref].result # Get top 3 tokens result_top_tokens = sorted(result, key=lambda x: x[1])[-3:] expected_top_tokens = [("will", 17), ("freedom", 13), ("from", 12)] assert state.is_successful() assert result_top_tokens == expected_top_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_and_get_word_count(self) -> int:\n r = requests.get(self.url)\n if r.status_code != status.HTTP_200_OK:\n raise ScraperException\n soup = BeautifulSoup(r.content, \"html.parser\")\n matches = soup(text=re.compile(f\"{self.word}\"))\n count = 0\n for match in matches:\n words = re.findall(fr\"\\b{self.word}\\b\", match)\n count = count + len(words)\n return count", "def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def result(self) -> int:\n return self._count", "def GetNumberOfResultsProcessed(self) -> int:\n return self.i", "def fetch_counts(swarming, start, end, state, tags, parallel):\n\n def process(data):\n return int(data['count'])\n delta = datetime.timedelta(days=1)\n return _fetch_daily_internal(delta, swarming, process, 'tasks/count', start,\n end, state, tags, parallel)", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def test_wordCount(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n wordCount = len(words)\n sampleTextWordCount = len(self.sampleSplitText)\n self.failUnlessEqual(wordCount, sampleTextWordCount)", "def raw_cluster_cmd_result(self, *args, **kwargs):\n kwargs['check_status'] = False\n proc = self.controller.run([os.path.join(BIN_PREFIX, \"ceph\")] + list(args), **kwargs)\n return proc.exitstatus", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def _get_count(results):\n return len(results)", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work", "def total_words(target_text):\n\n splited_text = target_text.split()\n nbwords = len(splited_text)\n return nbwords", "def test_py_job_result(self):\n test_app = self._create_py_app()\n class_path = \"example_jobs.word_count.WordCountSparkSessionJob\"\n conf = \"input.strings = ['a', 'b', 'a', 'b']\"\n job = self._create_job(test_app, class_path, conf,\n ctx=self._get_functional_py_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual({\"'a'\": 2, \"'b'\": 2}, job.result)", "def index_count(self,wordList,colName):\n\t\twordobj = topicmodels.RawDocs(wordList, \"stopwords.txt\")\n\t\twordobj.token_clean(1)\n\t\twordobj.stopword_remove(\"tokens\")\n\t\twordobj.stem()\n\t\tword_stems = set([s for d in wordobj.stems for s in d])\n\t\tdef count_frequency(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)&set(word_stems)].sum()\n\t\t#Create vector of frequencies for each paragraph of number of words in word_stems\n\t\tword_freqs = self.dataframe.text.apply(count_frequency)\n\t\t#Create vector of total number of words for each paragraph\n\t\ttotal_words = self.dataframe.text.apply(lambda x: len(x.split()))\n\t\t#Compute compute sentiment weights\n\t\tfreqs = word_freqs/total_words\n\t\tself.dataframe[colName]= freqs", "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def pcp_process_count(self):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\t\t\n\t\tself._PCPWrite('N'.encode(), 1)\n\t\twsize = self.int_to_bytes(4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"N\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('N')", "def wordcount(input_file_path):\n\n # Execute word count command on the input file and obtain the output\n result = subprocess.check_output(['wc', input_file_path], stderr=subprocess.STDOUT)\n result = result.decode('utf-8')\n\n # Split the output string into lines, words, and characters\n (lines, words, characters, _) = result.split()\n\n # Create metadata dictionary\n metadata = {\n 'lines': lines,\n 'words': words,\n 'characters': characters\n }\n\n # Store metadata in result dictionary\n result = {\n 'metadata': metadata\n }\n\n # Return the result dictionary\n return result", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def word_count(self):\n return Counter(self._normalize(self._raw_phrase_str))", "def local_ssd_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"local_ssd_count\")", "def get_total_distributed(self) -> int:\n return self._total_distributed.get()", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def is_distributed() -> int:\n return collective.is_distributed()", "def train_function(ham_l, spam_l):\n ham_word_data = count_directory_words(ham_l)\n spam_word_data = count_directory_words(spam_l)\n return ham_word_data, spam_word_data", "def get_rank():\n if not torch.distributed.is_available():\n return 0\n if not torch.distributed.is_initialized():\n return 0\n return torch.distributed.get_rank()", "def test_count_reads_per_cluster(self):\n \n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n total_reads, reads_per_cluster = count_reads_per_cluster(bedtool, None)\n \n self.assertListEqual([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36], reads_per_cluster)\n self.assertEqual(sum([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36]), total_reads)" ]
[ "0.5197405", "0.51779485", "0.51757103", "0.5119365", "0.5105249", "0.5077378", "0.50673574", "0.5065178", "0.50648814", "0.5048469", "0.50481963", "0.5037075", "0.5012519", "0.5001033", "0.49984887", "0.49640208", "0.49514315", "0.49300393", "0.4928559", "0.49280038", "0.48953736", "0.48758057", "0.48687533", "0.4866557", "0.48655432", "0.48269847", "0.4823187", "0.4822146", "0.48167002", "0.48079786" ]
0.6105681
0
return true, if the point x1,y1 is inside the circle
def isInCircle(self,x1,y1,radius1): if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def is_inside(self, points):\n points = np.atleast_2d(points) - self.centroid\n return np.logical_and(\n np.linalg.norm(points, axis=-1) <= self.radius,\n # At present circles are not orientable, so the z position must\n # match exactly.\n np.isclose(points[:, 2], 0),\n )", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def iscircle(a):\n if isarc(a):\n start=a[1][1] \n end=a[1][2]\n ## these are special, integer values that flag a true full\n ## circle.\n if start==0 and end==360:\n return True\n else:\n return False", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def check_point_in_detector(p, radius=radius, height=height, distance=distance):\r\n if p[0]**2 + p[1]**2 <= radius**2: # Are the x and y coordinates in the circle?\r\n if (p[2] >= distance) and (p[2] <= height+distance): # Is the z coordinate between the distance and the height?\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def isinsidearcXY(c,p):\n\n x = c[0]\n r = c[1][0]\n if dist(x,p) > r:\n return False\n if iscircle(c):\n return True\n start = c[1][1]%360.0\n end = c[1][2]%360.0\n if end < start:\n end+= 360.0\n p2 = sub(p,x)\n ang = (atan2(p2[1],p2[0]) % pi2)*360/pi2\n\n if end <= 360.0:\n return (ang >= start and ang <= end)\n else:\n return ang >= start or ang <= (end-360.0)", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def is_intersects(self, circle):\n if self.distance_to(circle) < self.radius + circle.radius:\n return True\n return False", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius", "def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def in_box(point, c1, c2):\n c1x, c1y = c1\n c2x, c2y = c2\n x, y = point\n return min(c1x, c2x) <= x <= max(c1x, c2x) and min(c1y, c2y) <= y <= max(c1y, c2y)", "def near(self,x1,y1,x2,y2):\n if x1 - x2 >= -1 and x1 - x2 <= 1 and\\\n y1 - y2 >= -1 and y1 - y2 <= 1:\n return True\n else:\n return False", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def is_in_collision_point(self, pos):\n x, y = pos\n return sqrt((self.x - x)**2 + (self.y - y)**2) < self.r", "def _circle_intersection(self, circle, point):\n dist = euclidean_distance((circle[0], circle[1]), point) - circle[2]\n vun = vec2d((circle[0] - point[0]), (circle[1] - point[1]))\n v = vun.normalized()\n\n x, y = (point[0] + dist * v.x), (point[0] + dist * v.x)\n\n return dist, (x, y)" ]
[ "0.861041", "0.8243542", "0.79211193", "0.7751971", "0.7741259", "0.7626933", "0.74056125", "0.7375218", "0.7361023", "0.7224813", "0.7188712", "0.7184341", "0.71725786", "0.71543485", "0.7150558", "0.7100634", "0.70758194", "0.7064576", "0.7006105", "0.6994791", "0.69880223", "0.6958462", "0.69326913", "0.6880365", "0.68758607", "0.6867388", "0.68348753", "0.68008894", "0.67928594", "0.6780323" ]
0.8587801
1
Returns a ist of pairs [name, template] for the given instrument. Templates are defined as JSON objects, with name stored in "reduction/dataflow/templates/..json".
def get_templates(instrument=''): import os, json template_path = os.path.dirname(__file__) template_names = [fn for fn in os.listdir(template_path) if fn.endswith(".json") and fn.startswith(instrument)] templates = dict([(tn[len(instrument)+1:-5], json.loads(open(os.path.join(template_path, tn), 'r').read())) for tn in template_names]) return templates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_template(template, config):\n all_results = {}\n for nodenum, wires in template:\n # Find the modules\n node = template.modules[nodenum]\n module_id = node['module'] #template.modules[node]\n module = lookup_module(module_id)\n inputs = _map_inputs(module, wires)\n \n # substitute values for inputs\n kwargs = dict((k, _lookup_results(all_results, v)) \n for k, v in inputs.items())\n \n # Include configuration information\n kwargs.update(node.get('config', {}))\n kwargs.update(config[nodenum])\n result = module.action(**kwargs)\n# print result\n all_results[nodenum] = result\n \n# return all_results\n# FIXXXXXXXXXXXXXXXXXXXXXX ***********************\n from .offspecular.instruments import convert_to_plottable\n return [convert_to_plottable(value['output']) if 'output' in value else {} for key, value in all_results.items()]", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def template_data(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"template_data\")", "def load_template(\n dataset: DatasetManager, template_dir: str, template_name: str\n) -> NexusTemplate:\n if template_name == \"linear\":\n return LinearNexusTemplate()\n\n fullpath = os.path.join(template_dir, template_name)\n with open(fullpath + \".json\", \"r\") as fdata:\n data = json.load(fdata)\n\n level_doors = []\n other_doors = []\n for eid, door_data in data[\"doors\"].items():\n if door_data[\"level\"] in dataset.levels:\n level_doors.append(eid)\n else:\n other_doors.append(eid)\n\n return NexusTemplate(fullpath, template_name, data, level_doors, other_doors)", "def template(self) -> 'outputs.PipelineTemplateResponse':\n return pulumi.get(self, \"template\")", "def T(request):\n\treturn all_templates[request.param]", "def read_template(pool, sim_tag, source_id, variable_id, fgt, output_file_path):\n\n connection = pool.connection()\n try:\n\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `template` FROM `run_info` WHERE `sim_tag`=%s and `source`=%s and \" \\\n \"`variable`=%s and `fgt`=%s\"\n row_count = cursor.execute(sql_statement, (sim_tag, source_id, variable_id, fgt))\n if row_count > 0:\n template_data = cursor.fetchone()['template']\n write_file(data=template_data, filename=output_file_path)\n else:\n return None\n\n return True\n except Exception as exception:\n error_message = \"Retrieving template failed for run info entry with source={}, variable={}, sim_tag={}, fgt={}\" \\\n .format(source_id, variable_id, sim_tag, fgt)\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def template_data(self) -> Any:\n return pulumi.get(self, \"template_data\")", "def ct(template_id = 1):\n print(templates[template_id])", "def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)", "def load_umi_template(json_template):\n if os.path.isfile(json_template):\n with open(json_template) as f:\n dicts = json.load(f, object_pairs_hook=OrderedDict)\n\n return [{key: json_normalize(value)} for key, value in dicts.items()]\n else:\n raise ValueError(\"File {} does not exist\".format(json_template))", "def generate_template(index_name):\n\n document = _BuildResultsMetaDocument()\n index = Index(name=index_name)\n index.document(document)\n index.settings(refresh_interval=\"30s\", number_of_shards=\"1\", number_of_replicas=\"1\")\n index.aliases(**{index_name: {}})\n index_template = index.as_template(template_name=\"template_\" + index_name, pattern=\"%s-*\" % index_name)\n return index_template.to_dict()", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }", "def return_template_output(base_dir,filename,data_dict):\n templateLoader = jinja2.FileSystemLoader( searchpath=base_dir)\n templateEnv = jinja2.Environment( loader=templateLoader )\n template = templateEnv.get_template(filename)\n output = template.render(data_dict)\n return output", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def _driver_template_data(self):\n return {\n 'driver_module': self.driver_modulename(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'driver_path': self.metadata.driver_path,\n 'release_notes': self.metadata.notes,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)", "def get_template(self, format):\n for pattern, converter in self._patterns:\n if converter.format == format:\n template = pattern.generate('{name}')\n if template:\n return template\n return '{name}' f'.{format}'", "def load_all_templates(dataset, template_dir: str) -> Dict[str, NexusTemplate]:\n template_set = {\n template_name\n for template_name in os.listdir(template_dir)\n if not template_name.endswith(\".json\")\n }\n template_set.add(\"linear\")\n\n template_ord = []\n for template_name in TEMPLATE_PREFERRED_ORDER:\n try:\n template_set.remove(template_name)\n except KeyError:\n pass\n else:\n template_ord.append(template_name)\n template_ord.extend(sorted(template_set))\n\n return {\n template_name: load_template(dataset, template_dir, template_name)\n for template_name in template_ord\n }", "def _load_template(template_file: str = None, module_name: str = None, stack_name: str = None) -> str:\n if template_file:\n # read the template file\n with open(template_file, 'r') as fh:\n template_body = fh.read()\n else:\n # Import the troposphere module\n stack = _import_tropo_module(stack_name, module_name)\n # Get the yaml template file\n template_body = stack.get_template().to_json()\n return template_body", "def get_sample_template(self) -> Sample:\n new = Sample.parse_file(self.example_sample)\n new.reset_id()\n return new", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def get_template(self, name, args):\n key = name, len(args)\n template = self.templates.get(key)\n if not template:\n raise mio.MIOException('Undefined template \"%s/%d\"' % (name, len(args)))\n return template", "def get_templates(self, template_name, **kwargs):\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return text", "def lookup_template(namespace, name):\r\n return LOOKUP[namespace].get_template(name)", "def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None,\n pupil=None, grating=None, readpattern=None, lamp=None):\n\n # If a single template name is input as a string, put it in a list\n if isinstance(templates, str):\n templates = [templates]\n\n # Make sure instrument is correct case\n instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]\n\n # instrument_inventory does not allow list inputs to\n # the added_filters input (or at least if you do provide a list, then\n # it becomes a nested list when it sends the query to MAST. The\n # nested list is subsequently ignored by MAST.)\n # So query once for each flat template, and combine outputs into a\n # single list.\n query_results = []\n for template_name in templates:\n\n # Create dictionary of parameters to add\n parameters = {\"date_obs_mjd\": {\"min\": start_date, \"max\": end_date},\n \"exp_type\": template_name}\n\n if detector is not None:\n parameters[\"detector\"] = detector\n if aperture is not None:\n parameters[\"apername\"] = aperture\n if filter_name is not None:\n parameters[\"filter\"] = filter_name\n if pupil is not None:\n parameters[\"pupil\"] = pupil\n if grating is not None:\n parameters[\"grating\"] = grating\n if readpattern is not None:\n parameters[\"readpatt\"] = readpattern\n if lamp is not None:\n parameters[\"lamp\"] = lamp\n\n query = instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,\n add_filters=parameters, return_data=True, caom=False)\n if len(query['data']) > 0:\n query_results.extend(query['data'])\n\n return query_results", "def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates" ]
[ "0.55452025", "0.5474284", "0.54666585", "0.54315674", "0.5427507", "0.5332231", "0.5303145", "0.52793384", "0.5269707", "0.5265125", "0.52575624", "0.52515024", "0.5250721", "0.5226359", "0.5199281", "0.51346874", "0.5112004", "0.5083223", "0.5079634", "0.50753623", "0.5062832", "0.50545514", "0.5051788", "0.50417155", "0.50399834", "0.50381446", "0.5033013", "0.5030086", "0.5018715", "0.5012147" ]
0.7779888
0
test whether rate article without token will fail.
def test_rate_article_without_token(self): response = self.client.post( self.rate_url, self.rate_details, format='json') self.assertIn( 'Authentication credentials were not provided.', str( response.data)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unsuccessful_rating_of_own_article(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.author_headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertIn(self.rate_own_article_error_message,\n response.data['message'])", "def test_rate_article_invalid_rate(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"rate\"] = 7\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.rate_details[\"user\"][\"rate\"] = 0\n resp = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n response.data))\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n resp.data))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"slug\"] = \"-ss-dd-dd-ff\"\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Article not found', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_unsuccessful_rating_of_nonexistent_article(self):\n self.slug = 'fake-slug'\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['message'],\n self.non_existent_article_message)", "def test_escalation_of_a_404_article(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article()\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(resp.data[\"detail\"], self.msg)", "def test_unsuccessful_rating_with_negative_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': -4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_min_value_error_message)", "def test_unsuccessful_rating_with_empty_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_empty_value_error_message)", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_post_without_token(self):\n client = Client()\n data = {\n 'rating': '1',\n 'title': 'Hated It!',\n 'summary': 'A little text to say that I hated it!',\n 'company': '1'\n }\n response = client.post('/reviews/', data)\n self.assertEqual(response.status_code, 401)", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def test_escalation_of_an_article_twice(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_twice()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"error\"], self.report_twice)", "def test_escalation_of_an_article_with_author(self):\n token = self.user2.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_escalation_of_an_article(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_non_logged_in_users_cannot_rate(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.logout()\n\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 1\n }, csrf_token, expected_status_int=401, expect_errors=True\n )", "def test_getting_of_an_escalated_article_with_users(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.get_article()\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(resp.data[\"error\"], self.get_admin)", "def test_unauthorized_request(self):\n # test false token\n user_id = self.create_user()[0]\n question_id = int(self.create_question(user_id)[0])\n false_token = self.post_data(question_id, headers={\"Authorization\":\"Bearer wrongtoken\"})\n self.assertEqual(false_token.status_code, 401)", "def test_get_without_token(self):\n client = Client()\n response = client.get('/reviews/')\n self.assertEqual(response.status_code, 401)", "def test_successful_article_rate(self):\n ratings_count = ArticleRating.objects.count()\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count+1)", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def test_post_answer_if_not_autheticated(self):\n response = self.post_answer()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_falsepositive_no_token_passed(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def check_rate(self):\n rate = self.rate_measurer.rate()\n if rate < self.request_rate:\n self._fail(WRequestRateTooLow(rate))\n\n if self.rate_measurer.outstanding() > self.max_outstanding:\n self._fail(WRequestOverload())", "def test_invalid_rating_value(self):\n url = reverse('rate-game')\n negative_rating = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': -1\n }\n big_rating = negative_rating\n big_rating['rating'] = 6\n\n negative = self.client.post(url, negative_rating, format='json')\n big = self.client.post(url, big_rating, format='json')\n\n self.assertEqual(negative.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(big.status_code, status.HTTP_400_BAD_REQUEST)", "def test_an_unauthenticated_user_cannot_like_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n format='json')\n\n self.assertEqual(\n like.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(like.status_code, 401)", "def test_unsuccessful_rating_with_rate_value_more_than_five(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 6},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_max_value_error_message)", "def test_invalid_amount(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 'cazc', 'date_of_expense': '10-01-2021'})\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], 'the amount entered is not a valid number')", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def test_error_no_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = []\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_reporting_your_own_article(self):\n \n response = self.client.post('/api/articles/', self.article,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n response = self.client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n self.assertEqual(result[\"errors\"], \"You cannot report your own article\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.7470178", "0.7465953", "0.7388795", "0.72328603", "0.6636202", "0.65985996", "0.653104", "0.63997334", "0.6340562", "0.61541426", "0.6136993", "0.6135138", "0.608342", "0.6076124", "0.6066384", "0.60158104", "0.60107666", "0.5995916", "0.59279543", "0.5847617", "0.58281684", "0.58182985", "0.5806917", "0.5793752", "0.5779415", "0.5754999", "0.5748369", "0.57421196", "0.57391614", "0.57355374" ]
0.81620646
0
test whether rate article without article will fail.
def test_rate_article_not_found(self): self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token) self.rate_details["user"]["slug"] = "-ss-dd-dd-ff" response = self.client.post( self.rate_url, self.rate_details, format='json') self.assertIn( 'Article not found', str( response.data)) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unsuccessful_rating_of_own_article(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.author_headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertIn(self.rate_own_article_error_message,\n response.data['message'])", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_rate_article_invalid_rate(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"rate\"] = 7\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.rate_details[\"user\"][\"rate\"] = 0\n resp = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n response.data))\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n resp.data))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_rate_article_without_token(self):\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Authentication credentials were not provided.', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_unsuccessful_rating_of_nonexistent_article(self):\n self.slug = 'fake-slug'\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['message'],\n self.non_existent_article_message)", "def test_escalation_of_a_404_article(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article()\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(resp.data[\"detail\"], self.msg)", "def test_unsuccessful_rating_with_empty_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_empty_value_error_message)", "def test_unsuccessful_rating_with_negative_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': -4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_min_value_error_message)", "def test_reporting_your_own_article(self):\n \n response = self.client.post('/api/articles/', self.article,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n response = self.client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n self.assertEqual(result[\"errors\"], \"You cannot report your own article\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_successful_article_rate(self):\n ratings_count = ArticleRating.objects.count()\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count+1)", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_report_article_more_than_once(self):\n from rest_framework.test import APIClient\n client = APIClient()\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result['errors'],'You can only report an article once')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_report_of_article_does_not_exist(self):\n from rest_framework.test import APIClient\n client = APIClient()\n response = client.post('/api/report/spoon/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"error_message\"], \"The article you are reporting does not exist\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_escalation_of_an_article_twice(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_twice()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"error\"], self.report_twice)", "def validate_article(article):\r\n\tcollection=create_database_connection()\r\n\tinsert_article(collection,article)", "def test_escalation_of_an_article_with_author(self):\n token = self.user2.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_can_get_comments_of_invalid_article(self):\n token = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token)\n\n response = self.create_comment(\n token=token,\n parentId=0,\n slug=response.data['article']['slug']\n )\n token = self.create_user(VALID_USER_DATA_2)\n\n get_comment_url = reverse('comments', kwargs={\n 'slug': 'random-non-existent-article-0x3',\n 'id': 0\n })\n response = self.client.get(\n get_comment_url,\n HTTP_AUTHORIZATION=token,\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_404_NOT_FOUND\n )", "def test_escalation_of_an_article(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_cannot_disllike_article_with_non_exitent_slug(self):\n self.create_article()\n\n dislike = self.client.post('/api/articles/{}/dislike/'.format(\"abc\"),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n dislike = json.loads(dislike.content.decode('utf-8'))\n self.assertEqual(dislike['error'], 'Article with slug abc not found')\n self.assertEqual(dislike['status'], 404)", "def test_contentious_prescription_no_rationale(self):\n url = reverse('admin:prescription_prescription_add')\n data = {\n 'name': 'Test',\n 'planned_season': 1,\n 'planned_year': 2013,\n 'region': 1,\n 'district': 1,\n 'location': 'Test location',\n 'perimeter': 20,\n 'area': 100,\n 'purposes': [1],\n 'remote_sensing_priority': 4,\n 'priority': 2,\n 'contentious': True,\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Prescription.objects.count(), 0)\n form = response.context['adminform'].form\n self.assertEqual(form.errors, {\n 'contentious_rationale': ['A contentious burn requires a '\n 'contentious rationale.']\n })", "def test_update_non_existing_article(self):\n response = self.update_article(\n self.article_data_2,\n \"this-is-a-non-existing-slug\"\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_getting_of_an_escalated_article_with_users(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.get_article()\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(resp.data[\"error\"], self.get_admin)", "def test_cannot_like_article_with_non_exitent_slug(self):\n self.create_article()\n\n like = self.client.post('/api/articles/{}/like/'.format(\"abc\"),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n like = json.loads(like.content.decode('utf-8'))\n self.assertEqual(like['error'], 'Article with slug abc not found')\n self.assertEqual(like['status'], 404)", "def test_error_no_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = []\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_an_unauthenticated_user_cannot_like_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n format='json')\n\n self.assertEqual(\n like.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(like.status_code, 401)", "def test_updating_non_existing_article(self):\n saved = self.create_article()\n token = saved[2]\n url = 'articles/notsaved'\n response = self.test_client.put(url, self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_an_unauthenticated_user_cannot_dislike_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n dislike = self.client.post('/api/articles/{}/dislike/'.format(slug),\n format='json')\n\n self.assertEqual(\n dislike.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(dislike.status_code, 401)", "def test_accept_missing_sources_as_tech_debt_expired(self):\n metric = Metric(\n self.DATA_MODEL,\n {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True, \"debt_end_date\": \"2020-01-01\"},\n METRIC_ID,\n )\n measurement = self.measurement(metric)\n self.assertIsNone(measurement.status())", "def test_successful_article_rate_update(self):\n\n self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n\n ratings_count = ArticleRating.objects.count()\n update_response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 5},\n format=\"json\",\n **self.headers)\n self.assertEqual(update_response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count)", "def test_invalid_rating_value(self):\n url = reverse('rate-game')\n negative_rating = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': -1\n }\n big_rating = negative_rating\n big_rating['rating'] = 6\n\n negative = self.client.post(url, negative_rating, format='json')\n big = self.client.post(url, big_rating, format='json')\n\n self.assertEqual(negative.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(big.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.78261167", "0.7498871", "0.7443131", "0.7402631", "0.7304586", "0.6678314", "0.65706855", "0.6514573", "0.64715177", "0.6429115", "0.639798", "0.6339268", "0.6214938", "0.6198013", "0.6197333", "0.61033016", "0.6053459", "0.5992556", "0.59885514", "0.59804475", "0.5936695", "0.59141463", "0.5907089", "0.58962506", "0.5876855", "0.5815807", "0.5795584", "0.57444054", "0.57315564", "0.57128435" ]
0.79441893
0
test whether rate article with invalid data will fail.
def test_rate_article_invalid_rate(self): self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token) self.rate_details["user"]["rate"] = 7 response = self.client.post( self.rate_url, self.rate_details, format='json') self.rate_details["user"]["rate"] = 0 resp = self.client.post( self.rate_url, self.rate_details, format='json') self.assertIn( 'invalid rate value should be > 0 or <=5', str( response.data)) self.assertIn( 'invalid rate value should be > 0 or <=5', str( resp.data)) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"slug\"] = \"-ss-dd-dd-ff\"\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Article not found', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_unsuccessful_rating_with_negative_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': -4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_min_value_error_message)", "def test_unsuccessful_rating_of_own_article(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.author_headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertIn(self.rate_own_article_error_message,\n response.data['message'])", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_rate_article_without_token(self):\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Authentication credentials were not provided.', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_unsuccessful_rating_with_empty_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_empty_value_error_message)", "def test_invalid_rating_value(self):\n url = reverse('rate-game')\n negative_rating = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': -1\n }\n big_rating = negative_rating\n big_rating['rating'] = 6\n\n negative = self.client.post(url, negative_rating, format='json')\n big = self.client.post(url, big_rating, format='json')\n\n self.assertEqual(negative.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(big.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unsuccessful_rating_of_nonexistent_article(self):\n self.slug = 'fake-slug'\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['message'],\n self.non_existent_article_message)", "def test_error_no_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = []\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_error_on_negative_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"][0][\"value\"] = float(round(Decimal(random.random()), 6) * -1)\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_error_on_rate_type(self):\n self.ocp_data[\"rates\"][0].pop(\"tiered_rates\")\n self.ocp_data[\"rates\"][0][\"bad_rates\"] = []\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_unsuccessful_rating_with_rate_value_more_than_five(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 6},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_max_value_error_message)", "def test_update_with_invalid_data(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_invalid_data2, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_tag_rates_error_on_negitive_usage_start(self):\n tag_values_kwargs = [{\"usage_start\": -5}]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n self.assertFalse(serializer.is_valid(raise_exception=True))\n result_err_msg = serializer.errors[\"rates\"][0][\"tag_values\"][\"usage\"][0]\n expected_err_msg = \"A tag rate usage_start must be positive.\"\n self.assertEqual(result_err_msg, expected_err_msg)", "def test_invalid_year_fail(self):\n ar = awstats_reader.AwstatsReader('/tmp', 'example.com')\n self.assertRaises(KeyError, ar.__getitem__, 9999)", "def test_invalid_amount(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 'cazc', 'date_of_expense': '10-01-2021'})\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], 'the amount entered is not a valid number')", "def test_tag_rates_error_on_negitive_usage_end(self):\n tag_values_kwargs = [{\"usage_end\": -5}]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n self.assertFalse(serializer.is_valid(raise_exception=True))\n result_err_msg = serializer.errors[\"rates\"][0][\"tag_values\"][\"usage\"][0]\n expected_err_msg = \"A tag rate usage_end must be positive.\"\n self.assertEqual(result_err_msg, expected_err_msg)", "def test_with_invalid_input(self):\n for dataset_type in ['ruler', 'pencil', 'cheese']:\n with self.assertRaises(ValueError) as exc:\n check_dataset_type(dataset_type)\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is %s\" % dataset_type,\n str(exc.exception))", "def test_tag_rates_error_on_negitive_tag_value(self):\n tag_values_kwargs = [{\"value\": -0.2}]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n self.assertFalse(serializer.is_valid(raise_exception=True))\n result_err_msg = serializer.errors[\"rates\"][0][\"tag_values\"][\"value\"][0]\n expected_err_msg = \"A tag rate value must be nonnegative.\"\n self.assertEqual(result_err_msg, expected_err_msg)", "def check_rate(self):\n rate = self.rate_measurer.rate()\n if rate < self.request_rate:\n self._fail(WRequestRateTooLow(rate))\n\n if self.rate_measurer.outstanding() > self.max_outstanding:\n self._fail(WRequestOverload())", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_error_on_invalid_metric(self):\n self.ocp_data.get(\"rates\", [])[0][\"metric\"][\"name\"] = \"invalid_metric\"\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_negative_documents(self):\n\n idf = { 'a': 3, 'b': 1 }\n self.assertRaises(ValueError, TFIDFScorer, idf, -1)", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_bad_period(self):\n self.period = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)", "def test_bayes_updates_bad_data(self):\r\n self.assertRaises(ValueError, bayes_updates, self.bad)", "def test_non_logged_in_users_cannot_rate(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.logout()\n\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 1\n }, csrf_token, expected_status_int=401, expect_errors=True\n )", "def test_accept_missing_sources_as_tech_debt_expired(self):\n metric = Metric(\n self.DATA_MODEL,\n {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True, \"debt_end_date\": \"2020-01-01\"},\n METRIC_ID,\n )\n measurement = self.measurement(metric)\n self.assertIsNone(measurement.status())", "def is_valid(self, dataset):\n pass" ]
[ "0.7321892", "0.7151747", "0.71178585", "0.69698894", "0.69361943", "0.6869482", "0.6740184", "0.66798234", "0.6647462", "0.66111946", "0.65852267", "0.6337533", "0.62674284", "0.620593", "0.6205748", "0.6204665", "0.61944914", "0.61677474", "0.6126372", "0.6076587", "0.60725075", "0.60725075", "0.6058899", "0.6057467", "0.60409534", "0.6034508", "0.6030775", "0.60254586", "0.6003327", "0.59737307" ]
0.8335973
0
test whether rate article with token.
def test_get_rate_article(self): self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token) self.client.post( self.rate_url, self.rate_details, format='json') self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2) self.rate_details["user"]['rate'] = 4 self.client.post( self.rate_url, self.rate_details, format='json') response = self.client.get( self.view_rates_url + str(1) + "/", format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rate_article_without_token(self):\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Authentication credentials were not provided.', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def is_token_valid(self,pk,request):\n\n pass", "def verify_token(self, token):\n return False", "def test_rate_article_invalid_rate(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"rate\"] = 7\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.rate_details[\"user\"][\"rate\"] = 0\n resp = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n response.data))\n self.assertIn(\n 'invalid rate value should be > 0 or <=5', str(\n resp.data))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unsuccessful_rating_of_own_article(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.author_headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertIn(self.rate_own_article_error_message,\n response.data['message'])", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True", "def test_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"slug\"] = \"-ss-dd-dd-ff\"\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Article not found', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def check_token(token):\n token = db.session.query(Token).filter(Token.token==token).first()\n if token == None:\n return False\n #TODO token lifetime\n #if (datetime.datetime.now() - token.date >= datetime.timedelta(day=2)):\n # return False \n return True", "def test_successful_article_rate(self):\n ratings_count = ArticleRating.objects.count()\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count+1)", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def checkToken( self ):\n\n if ( self.token == None ):\n return False\n else :\n d = {\n \"auth_token\" : str(self.token) ,\n \"method\" : \"flickr.auth.checkToken\",\n \"format\" : \"json\",\n \"nojsoncallback\" : \"1\"\n }\n sig = self.signCall( d )\n\n url = self.urlGen( api.rest, d, sig )\n try:\n res = self.getResponse( url )\n if ( self.isGood( res ) ):\n self.token = res['auth']['token']['_content']\n self.perms = res['auth']['perms']['_content']\n return True\n else :\n self.reportError( res )\n except:\n print(str(sys.exc_info()))\n return False", "def is_token_revoked(decoded_token):\r\n jti = decoded_token['jti']\r\n try:\r\n token = TokenBlacklist.query.filter_by(jti=jti).one()\r\n return token.revoked\r\n except NoResultFound:\r\n return True", "def is_rate_validated(from_no):\n from_key_time = \"from_time_\"+from_no\n from_key_count = \"from_count_\" + from_no\n\n if not get_cache(from_key_time) or not get_cache(from_key_count):\n set_cache(from_key_time, time.time())\n set_cache(from_key_count, 1)\n return True\n cached_time = get_cache(from_key_time)\n time_diff = time.time() - cached_time\n cached_count = get_cache(from_key_count)\n\n if time_diff < RATE_LIMIT_DURATION and cached_count >= RATE_LIMIT_COUNT:\n return False\n elif time_diff > RATE_LIMIT_DURATION:\n set_cache(from_key_time, cached_time + RATE_LIMIT_DURATION)\n set_cache(from_key_count, 1)\n return True\n else: # cached_count < RATE_LIMIT_COUNT\n # print(\"hit from -%s, count - %s\" % (from_no,cached_count))\n set_cache(from_key_count, cached_count+1)\n return True", "def is_token_revoked(decoded_token):\n jti = decoded_token['jti']\n token = BlacklistedToken.query.filter_by(jti=jti).first()\n return token is not None", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def test_token(authToken):\n url = endpoint('test')\n r = requests.get(url, headers={'authorizationToken': authToken}) \n if r.status_code == 403:\n print(\"403\")\n return False\n response = json.loads( r.content.decode() )\n return response", "def is_revoked(self, token: str) -> bool:\n return token in self.revoked_tokens", "def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")", "def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True", "def test_getting_of_an_escalated_article_with_users(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.get_article()\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(resp.data[\"error\"], self.get_admin)", "def correct_token(name, token):\n if not User.created(name):\n return False\n user = User.get_user(name)\n return user.info['token'] == token", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def test_escalation_of_an_article_with_author(self):\n token = self.user2.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "async def validate_token(self, token):", "def test_escalation_of_an_article_twice(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_twice()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"error\"], self.report_twice)", "def decoratedCheckToken(*args, **kwargs):\n if \"token\" not in request.headers:\n raise InvalidUsage(\"Must pass a token!\")\n\n # Execute if the token matches\n logger.debug(\"Token: {0}\".format(request.headers[\"token\"]))\n if request.headers[\"token\"] == receiverParameters[\"apiToken\"]:\n return func(*args, **kwargs)\n\n # Note that it is invalid otherwise\n raise InvalidUsage(\"Invalid token!\")", "def test_verification_with_valid_token(self) -> None:\n\n secret_key = str(self.author.secret_key)\n verification_url = reverse('author:verify', kwargs={'secret_key': str(secret_key)})\n\n # Make sure URL's don't change.\n self.assertEqual(verification_url, f'/api/authors/verify/{secret_key}/')\n\n # Make valid request and get response\n response: Response = self.client.get(verification_url)\n\n self.assertEqual(response.status_code, 302)\n\n # Now test if the method \"verify\" was called\n self.assertEqual(Author.objects.get().verified, True)\n # We don't wanna give him too many privileges\n self.assertEqual(self.author.is_staff, False)", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True" ]
[ "0.7210318", "0.6049662", "0.59960324", "0.59435606", "0.5923598", "0.5863462", "0.5765565", "0.57086027", "0.570428", "0.5696122", "0.56727237", "0.5531006", "0.5477901", "0.54732865", "0.5456026", "0.54417336", "0.54382604", "0.54163957", "0.54120475", "0.54111814", "0.5409284", "0.53820884", "0.5374469", "0.5367698", "0.5364008", "0.53541934", "0.53501594", "0.5342246", "0.5341496", "0.53359014" ]
0.6411341
1
Converts an RFC2253 DN string into an openssl one.
def __rfc_to_openssl(user_dn): dn_parts = [x.strip() for x in user_dn.split(',')] dn_parts.reverse() return '/%s' % '/'.join(dn_parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_x509_name(name):\n types = {\n 'country_name': 'C',\n 'state_or_province_name': 'ST',\n 'locality_name': 'L',\n 'organization_name': 'O',\n 'organizational_unit_name': 'OU',\n 'common_name': 'CN',\n 'email_address': 'emailAddress'\n }\n\n return '/'.join(['{}={}'.format(types[attr], name.native[attr]) for attr in name.native])", "def convert_dn(dn):\n if re.match(\"^/.*\", dn):\n return dn\n\n new_dn = \"\"\n attrs = dn.split(\",\")\n for attr in attrs:\n prm_tuple = attr.split(\"=\")\n k = prm_tuple[0].strip()\n v = prm_tuple[1].strip()\n new_dn = new_dn + f'/{k}={v}'\n return new_dn", "def parse_dn(dnstr):\n res = []\n for part in loop_escaped(dnstr, '/'):\n part = part.strip()\n if not part:\n continue\n if '=' not in part:\n raise InvalidCertificate(\"Need k=v in Name string\")\n k, v = part.split('=', 1)\n res.append((k.strip(), v.strip()))\n return res", "def iri2uri(uri): \r\n if isinstance(uri ,unicode):\r\n (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)\r\n authority = authority.encode('idna')\r\n # For each character in 'ucschar' or 'iprivate'\r\n # 1. encode as utf-8\r\n # 2. then %-encode each octet of that utf-8 \r\n uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))\r\n uri = \"\".join([encode(c) for c in uri])\r\n return uri", "def load_cert_der_string(string):\n bio = BIO.MemoryBuffer(string)\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def idna_encode(self, domain):\n try:\n if isinstance(domain, str):\n domain = domain.decode('utf-8')\n return domain.encode('idna')\n except UnicodeError:\n return domain", "def _convert_to_idn(url):\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url", "def Serialize(cls, dn, separator=\"/\", sort=True):\n\n if separator not in cls.VALID_SEPARATORS:\n raise X509SubjectNameConfigError(\"Invalid field separator %r\" % separator)\n\n # If using '/' then prepend DN with an initial '/' char\n if separator == \"/\":\n s_dn = separator\n else:\n s_dn = \"\"\n\n dn_list = []\n for key, val in dn.items():\n if val:\n if isinstance(val, tuple):\n kv_pairs = [\"%s=%s\" % (key, val_sub) for val_sub in val]\n dn_list += [separator.join(kv_pairs)]\n else:\n dn_list += [\"%s=%s\" % (key, val)]\n\n if sort:\n dn_list.sort()\n\n s_dn += separator.join(dn_list)\n\n return s_dn", "def SID_converter(self, sid_hex):\n\t\t#Original form Source: https://github.com/google/grr/blob/master/grr/parsers/wmi_parser.py\n\t\tif sid_hex is None:\n\t\t\treturn 'None'\n\t\tsid = sid_hex.decode('hex')\n\n\t\tif not sid:\n\t\t\treturn \"\"\n\t\tstr_sid_components = [ sid[0].encode('hex').lstrip('0') ]\n\t\t# Now decode the 48-byte portion\n\t\tsid_str = ''\n\t\tif len(sid) >= 8:\n\t\t\tsubauthority_count = sid[1]\n\t\t\tidentifier_authority = struct.unpack(\">H\", sid[2:4])[0]\n\t\t\tidentifier_authority <<= 32\n\t\t\tidentifier_authority |= struct.unpack(\">L\", sid[4:8])[0]\n\t\t\tstr_sid_components.append(identifier_authority)\n\t\t\tstart = 8\n\t\t\t#print subauthority_count.encode('hex')\n\t\t\tfor i in range( int( subauthority_count.encode('hex') , 16 ) ):\n\t\t\t\tauthority = sid[start:start + 4]\n\t\t\t\tif not authority:\n\t\t\t\t\tbreak\n\t\t\t\tif len(authority) < 4:\n\t\t\t\t\traise ValueError(\"In binary SID '%s', component %d has been truncated. \"\n\t\t\t\t\t\t\t\t\t\"Expected 4 bytes, found %d: (%s)\",\n\t\t\t\t\t\t\t\t\t\",\".join([str(ord(c)) for c in sid]), i,\n\t\t\t\t\t\t\t\t\tlen(authority), authority)\n\t\t\t\tstr_sid_components.append(struct.unpack(\"<L\", authority)[0])\n\t\t\t\tstart += 4\n\t\t\t\tsid_str = \"S-%s\" % (\"-\".join([str(x) for x in str_sid_components]))\n\n\t\treturn sid_str", "def decode_email(email):\n return", "def _ssl_dn_extract_info(dn_string):\r\n ss = re.search('/emailAddress=(.*)@([^/]+)', dn_string)\r\n if ss:\r\n user = ss.group(1)\r\n email = \"%s@%s\" % (user, ss.group(2))\r\n else:\r\n return None\r\n ss = re.search('/CN=([^/]+)/', dn_string)\r\n if ss:\r\n fullname = ss.group(1)\r\n else:\r\n return None\r\n return (user, email, fullname)", "def xforwardedforclientcert_issuerdnalias(self) -> str:\n return pulumi.get(self, \"xforwardedforclientcert_issuerdnalias\")", "def parse(cls, dn, separator=None):\n if separator in (\"/\", None):\n parser_re = cls.SLASH_PARSER_RE\n elif separator == \",\":\n parser_re = cls.COMMA_PARSER_RE\n else:\n raise X509SubjectNameConfigError(\"Invalid field separator %r\" % separator)\n\n dn_fields = parser_re.split(dn)\n if len(dn_fields) < 2:\n raise X509SubjectNameConfigError('Error parsing DN string: \"%s\"' % dn)\n\n items = zip(dn_fields[1::2], dn_fields[2::2])\n\n # Strip leading and trailing space chars and convert into a\n # dictionary\n parsed_dn = {}\n for key, val in items:\n key = key.strip()\n if key in parsed_dn:\n if isinstance(parsed_dn[key], tuple):\n parsed_dn[key] = tuple(list(parsed_dn[key]) + [val])\n else:\n parsed_dn[key] = (parsed_dn[key], val)\n else:\n parsed_dn[key] = val\n\n return parsed_dn", "def load_cert_string(string, format=FORMAT_PEM):\n bio = BIO.MemoryBuffer(string)\n return load_cert_bio(bio, format)", "def iri_to_uri(iri, encoding='Latin-1'):\r\n scheme, authority, path, query, frag = urlparse.urlsplit(iri)\r\n scheme = scheme.encode(encoding)\r\n if \":\" in authority:\r\n host, port = authority.split(\":\", 1)\r\n authority = host.encode('idna') + \":%s\" % port\r\n else:\r\n authority = authority.encode(encoding)\r\n path = urlparse.quote(path.encode(encoding), safe=\"/;%[]=:$&()+,!?*@'~\")\r\n query = urlparse.quote(query.encode(encoding), safe=\"/;%[]=:$&()+,!?*@'~\")\r\n frag = urlparse.quote(frag.encode(encoding), safe=\"/;%[]=:$&()+,!?*@'~\")\r\n return urlparse.urlunsplit((scheme.decode('utf-8'), authority.decode('utf-8'), path, query, frag))", "def test_rfc_nickkey_normal(s):\n normal = util.rfc_nickkey(s)\n assert normal == util.rfc_nickkey(normal)", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def get_cert_issuer_string_hash(cert):\n try:\n public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)\n cert_c = crypto.load_certificate(crypto.FILETYPE_PEM, public_bytes)\n\n # get the issuer object from the loaded certificate\n cert_issuer = cert_c.get_issuer()\n\n # for each component presented on certificate issuer,\n # converts the respective name and value for strings and join all\n # together\n issuer_attributes = \"\".join(\"/{0:s}={1:s}\".format(name.decode(),\n value.decode())\n for name, value in\n cert_issuer.get_components())\n\n # apply the hash function to binary form of the string above and\n # digest it as a hexdecimal value, and take the first 16 bytes.\n hashed_attributes = \\\n hashlib.md5(issuer_attributes.encode()).hexdigest()[:16]\n\n LOG.info(\"hashed issuer attributes %s from certificate \"\n % hashed_attributes)\n except Exception:\n LOG.exception()\n raise exception.SysinvException(_(\n \"Failed to get certificate issuer hash.\"))\n\n return hashed_attributes", "def django_password_to_ldap(django_password):\n scheme, salt, hexpasswd = django_password.split(\"$\")\n if scheme != 'sha1':\n raise KeyError(\"scheme %r is not supported by django-ldap-pixiedust\" % scheme)\n passwd = hexpasswd.decode('hex')\n return \"{SSHA}\" + encodestring(passwd + str(salt)).rstrip()", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)", "def encode_ipv4(self, input):\n return inet_aton(input)", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def normalize_address(address: str):\n return Web3.toChecksumAddress(address.lower())", "def opensslCmsCertCreate( ownerCertFile ):\n opensslCmdArgs = [ \"openssl\", \"crl2pkcs7\", \"-certfile\", ownerCertFile,\n \"-nocrl\", \"-outform\", \"der\" ]\n ownerCertCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return ownerCertCmsDerBase64", "def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )", "def parse_chain_id(data: bytes):\n return base58_encode(data, b'Net').decode()", "def decode_email_address(address, charset=\"utf8\"):\r\n name = decode_email_header(address[0])\r\n addr = address[1]\r\n addr = \"<\" + addr + \">\"\r\n if not name:\r\n return addr\r\n return name + \" \" + addr", "def ipv6_to_ipv4(ipv6_str):\n return '.'.join([str(b) for b in ipv6_str[12:]])", "def fromemail(cls, email):\n m = md5.new()\n m.update(email.strip().lower())\n usermd5 = str(m.hexdigest())\n return cls.frommd5(usermd5)" ]
[ "0.5508678", "0.54845005", "0.5012062", "0.4976776", "0.48952284", "0.4741694", "0.46849376", "0.4653766", "0.46534392", "0.46064866", "0.45833877", "0.4550512", "0.445573", "0.44530204", "0.44428983", "0.44048032", "0.43419307", "0.4334071", "0.43117622", "0.4303301", "0.42497525", "0.4245215", "0.42373177", "0.42349565", "0.4233647", "0.4230566", "0.42281842", "0.42255646", "0.4220895", "0.42070237" ]
0.7482935
0
Generate random ping values (in milliseconds) between 5 and 20, some of them will be assigned randomly to a low latency between 100 and 200 with direct close values between 40 and 80
def generate_fake_ping_data(random_state, size): values = random_state.random_integers(low=5, high=20, size=size) picked_low_latency_values_indexes = random_state.choice( size, round(0.001 * len(values)), replace=False ) # Sets the picked value to a random low ping (e.g.: [100, 200]), # and sets the direct close values to a ping between 40 and 80ms for index in picked_low_latency_values_indexes: if index - 1 >= 0: values[index - 1] = random_state.random_integers(40, 80) values[index] = random_state.random_integers(100, 200) if index + 1 < size: values[index + 1] = random_state.random_integers(40, 80) return values.tolist()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_packets():\n num_packets = randrange(10)\n temp_packets = []\n for i in range(num_packets):\n temp_packets.append(randrange(1000))\n return temp_packets", "def RandomDelay():\r\n sleep(random())", "def _latency(self):\n\n return\n time.sleep(0.005 + random.random() / 30.)", "def sensorInfo(timeDelay = .5):\n cycle = time.time()\n while (time.time() - cycle < timeDelay):\n pass\n return random.randint(1,10000)", "def example2():\n arrive_time=rand_arr_time.rand_arr_time(6,100000,1000) # Get packet arrive time, with option 2, 100000 packets, expected in 1000 seconds.\n return arrive_time", "def get_latency(a, low, high):\n\n if random.random() > 0.69:\n dist = random.randint(1000000,20000000)\n else:\n q = random.uniform(low,high)\n dist = int(stats.pareto.ppf(q, a))\n\n return dist/(1000*66) + 5", "def get_random_sleep() -> int:\n return random.randint(1, 9)", "def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)", "def delayToNextPacket(self):\n delay = -(1.0 / (self.mPacketsPerSecond)) * np.log(1 - np.random.uniform())\n # exponential distribution in seconds\n return round(delay * Constants.TICKS_PER_SECOND)\n #return (Math.round(delay * Main.TICKS_PER_SECOND))", "def getRandomEvent(p = 0.5):\n if random.random() < p: return 1\n else: return 0", "def data_feeder_2():\n return random.sample(range(100), 10)", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def get_random_time_between_tts():\n return random.randint(TIME_BETWEEN_TTS_LOWER, TIME_BETWEEN_TTS_UPPER)", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def random_ip():\n return new_ip(\"%i.%i.%i.%i\" % (randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254))) # nosec", "def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)", "def ping_latency(self, run_test):\n\n if not run_test:\n return\n\n ping_res = None\n\n for site in self.sites:\n ping_cmd = \"ping -i {:.2f} -c {:d} -w {:d} {:s}\".format(\n 0.25, 10, 5, site)\n ping_res = Popen(ping_cmd, shell=True,\n stdout=PIPE).stdout.read().decode('utf-8')\n\n ping_pkt_loss = float(re.findall(', ([0-9.]*)% packet loss',\n ping_res, re.MULTILINE)[0])\n\n ping_rtt_ms = re.findall(\n 'rtt [a-z/]* = ([0-9.]*)/([0-9.]*)/([0-9.]*)/([0-9.]*) ms'\n , ping_res)[0]\n\n ping_rtt_ms = [float(v) for v in ping_rtt_ms]\n\n label = self.labels[site]\n\n self.results[label + \"_packet_loss_pct\"] = ping_pkt_loss\n self.results[label + \"_rtt_min_ms\"] = ping_rtt_ms[0]\n self.results[label + \"_rtt_max_ms\"] = ping_rtt_ms[2]\n self.results[label + \"_rtt_avg_ms\"] = ping_rtt_ms[1]\n self.results[label + \"_rtt_mdev_ms\"] = ping_rtt_ms[3]\n\n if not self.quiet:\n print(f'\\n --- {label} ping latency ---')\n print(f'Packet Loss: {ping_pkt_loss}%')\n print(f'Average RTT: {ping_rtt_ms[0]} (ms)')\n print(f'Minimum RTT: {ping_rtt_ms[1]} (ms)')\n print(f'Maximum RTT: {ping_rtt_ms[2]} (ms)')\n print(f'RTT Std Dev: {ping_rtt_ms[3]} (ms)')\n \n return ping_res", "def rand_int_jitter():\n return random.randint(0, 10)", "def random():\r\n return R.NextDouble()", "def randomSleep():\n\n timeToWait = random.choice((0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 5))\n print(\"sleeping\", timeToWait)\n time.sleep(timeToWait)", "def _burn_cpu():\n while True:\n random()*random()", "def time_per_process():\n return int(random.normalvariate(PT_MEAN, PT_SIGMA))", "def time_per_process():\n return int(random.normalvariate(PT_MEAN, PT_SIGMA))", "def ping_repeat(host_name, data_size=None, time_pause=None, count_send=None, timeout=None):\n\n if time_pause is None:\n time_pause = 5. # milliseconds\n \n if count_send is None:\n count_send = 25\n \n if timeout is None:\n timeout = 1000. # ms\n \n # Make a socket, send a sequence of pings.\n sock = create_socket(host_name, timeout=timeout/1000.) # note: timeout in seconds, not milliseconds.\n time_sweep_start = now()\n time_sleeping = 0.\n results = []\n for k in range(count_send):\n if k > 0:\n # Little pause between sending packets. Try to be a little nice.\n time.sleep(time_pause / 1000.) # sleep in seconds, not milliseconds.\n\n res = ping_once(sock, data_size=data_size)\n results.append(res)\n\n # Process results.\n count_timeout = 0\n count_corrupt = 0\n\n times = []\n for res in results:\n if res['is_same_data']:\n times.append(res['time_ping'])\n else:\n if res['time_ping'] is None:\n # Packet was lost because of timeout. Most likely cause.\n count_timeout += 1\n else:\n # Packet is considered lost since returned payload did match original.\n count_corrupt += 1\n\n\n\n count_lost = count_timeout + count_corrupt\n count_recv = count_send - count_lost\n\n # num_packets = len(times)\n data_size = results[0]['data_size']\n\n P = [0.00, 0.25, 0.50, 1.00]\n P_times = percentile(times, P)\n\n # Subtract minimum time from later values.\n for k in range(1, len(P_times)):\n P_times[k] -= P_times[0]\n \n stats = {'host_name':host_name,\n 'data_size':data_size,\n 'times':times,\n 'timeout':res['timeout'],\n 'time_pause':time_pause,\n 'P':P,\n 'P_times':P_times,\n 'count_send':count_send,\n 'count_timeout':count_timeout,\n 'count_corrupt':count_corrupt,\n 'count_lost':count_lost}\n\n # Done.\n return stats", "def sleepBotosan(minTime=0.2, maxTime=0.6):\n time.sleep(random.uniform(minTime, maxTime))", "def random_pause():\n pause_time = random.uniform(0, 0.5)\n sleep(pause_time)", "def unforeseen():\r\n return random.gauss(300., 100.)", "def generate_fake_http_status(random_state, size):\n values = [200] * size\n picked_error_values_indexes = random_state.choice(\n size, round(0.0015 * len(values)), replace=False\n )\n picked_zero_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n for index in picked_zero_values_indexes:\n values[index] = 0\n\n for idx in picked_error_values_indexes:\n for i in range(random_state.random_integers(1, 20)):\n try:\n values[idx + i] = 500\n except IndexError:\n pass\n\n return values", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def get_delay(self, pid1, pid2, is_block):\n is_slow = self.node_is_slow[pid1] or self.node_is_slow[pid2]\n p = random.uniform(Parameters.p_min, Parameters.p_max)\n c = Parameters.c_low if is_slow else Parameters.c_high\n m = Parameters.m if is_block else 0\n d = random.expovariate(c / Parameters.d)\n return (p + m/c + d)" ]
[ "0.6549224", "0.6473065", "0.6374598", "0.63629764", "0.63007534", "0.62886626", "0.62327564", "0.61525434", "0.6147178", "0.5989403", "0.5946946", "0.58974904", "0.58592856", "0.58525425", "0.58462614", "0.57716644", "0.5742075", "0.57388645", "0.5709644", "0.5692329", "0.5666731", "0.565969", "0.565969", "0.56461304", "0.5643166", "0.5622797", "0.5599253", "0.55673194", "0.5565871", "0.55656725" ]
0.7326371
0
Generate random OCO status, mostly assigned to 200 with some 300 status codes during a random range between 4 and 50 ticks
def generate_fake_oco_status(random_state, size): values = [200] * size picked_error_values_indexes = random_state.choice( size, round(0.001 * len(values)), replace=False ) for index in picked_error_values_indexes: values[index] = 300 _range = range(random_state.random_integers(0, 50)) for n in _range: position = index + n if position < size: values[position] = 300 return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_fake_http_status(random_state, size):\n values = [200] * size\n picked_error_values_indexes = random_state.choice(\n size, round(0.0015 * len(values)), replace=False\n )\n picked_zero_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n for index in picked_zero_values_indexes:\n values[index] = 0\n\n for idx in picked_error_values_indexes:\n for i in range(random_state.random_integers(1, 20)):\n try:\n values[idx + i] = 500\n except IndexError:\n pass\n\n return values", "def __get_status(self):\n return random.choice(self.STATUS)", "def get_status():\n p = randint(0, 10)\n\n if p < 7:\n return problem(\n status=200,\n title=\"Success\",\n detail=\"Il servizio funziona correttamente\",\n ext={\"result\": \"ok\"},\n headers={\"Cache-control\": \"no-cache\"},\n )\n if p < 9:\n return problem(\n status=503,\n title=\"Service Unavailable\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\"Retry-After\": \"1\", \"Cache-control\": \"no-cache\"},\n )\n\n return problem(\n status=429,\n title=\"Too Many Requests\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\n \"Cache-control\": \"no-cache\",\n \"X-RateLimit-Limit\": \"10\",\n \"X-RateLimit-Reset\": \"1\",\n \"X-RateLimit-Remaining\": \"0\",\n \"Retry-After\": \"1\",\n },\n )", "def get_status():\n p = randint(0, 10)\n\n if p < 7:\n return problem(\n status=200,\n title=\"Success\",\n detail=\"Il servizio funziona correttamente\",\n ext={\"result\": \"ok\"},\n headers={\"Cache-control\": \"no-cache\"},\n )\n if p < 9:\n return problem(\n status=503,\n title=\"Service Unavailable\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\"Retry-After\": \"1\", \"Cache-control\": \"no-cache\"},\n )\n\n return problem(\n status=429,\n title=\"Too Many Requests\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\n \"Cache-control\": \"no-cache\",\n \"X-RateLimit-Limit\": \"10\",\n \"X-RateLimit-Reset\": \"1\",\n \"X-RateLimit-Remaining\": \"0\",\n \"Retry-After\": \"1\",\n },\n )", "def somebells():\n return random.randint(100, 500)", "def getStatusMessage():\n\n now = datetime.datetime.now()\n hour = now.hour\n mood = Sentience.getPrimaryMood()\n exp_mood = Sentience.getExposedPositivity()\n\n random.seed((time.time()//86400*86400))\n\n #sleeping\n if not (9 <= hour < 21) and (mood <= 0.5 or not 7 <= hour < 23):\n if exp_mood < -0.1:\n return random.choice([\n \"bleh\",\n \"not sleeping well\",\n \"why's chat so noisy\",\n \"can't sleep\",\n \"do not disturb pls thx\",\n ])\n\n if mood < 0:\n return random.choice([\n \"crying myself to sleep rn\",\n \":(\",\n \"had a nightmare\",\n \"can't sleep\",\n \"._.\"\n ])\n\n return random.choice([\n \"zzz...\",\n \"sweet dreams\",\n \"good night\",\n \"sleeping...\",\n \"having some rest\"\n ])\n\n if Sentience.isExposedPositivityOverloaded():\n return random.choice([\n \"i'm done\",\n \"too much\"\n \"goodbye\",\n \"tired\",\n \"need rest\",\n ])\n\n #happy\n if mood >= 0.7:\n return random.choice([\n \":D\",\n \"great day\",\n \"happy happy\",\n \"hehe\",\n \"good times\",\n \"yay\",\n \"what's up\",\n \"happiness\",\n \"nice day\",\n ])\n #moody-ish\n if mood >= 0.4:\n return random.choice([\n \"hmm\",\n \"yeet\",\n \"bleh\",\n \"oh\",\n \"moody rn\",\n \"nothing\"\n ])\n #more moody\n if mood >= -0.3:\n return random.choice([\n \"moody rn\",\n \"not happy\",\n \"i'm fine.\",\n \"bleh\",\n \"._.\",\n \":(\",\n ])\n #very unhappy\n return random.choice([\n \"sad\",\n \"cries\",\n \"roar\",\n \":_(\",\n \">:(\",\n \"mad\",\n \"angry\",\n \"I'M FINE.\",\n \"bleh\",\n \"no\",\n ])", "def generate_fake_ping_data(random_state, size):\n values = random_state.random_integers(low=5, high=20, size=size)\n picked_low_latency_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n # Sets the picked value to a random low ping (e.g.: [100, 200]),\n # and sets the direct close values to a ping between 40 and 80ms\n for index in picked_low_latency_values_indexes:\n if index - 1 >= 0:\n values[index - 1] = random_state.random_integers(40, 80)\n\n values[index] = random_state.random_integers(100, 200)\n\n if index + 1 < size:\n values[index + 1] = random_state.random_integers(40, 80)\n\n return values.tolist()", "def status():\n\n\treturn libcrypto.RAND_status()", "def random():\n return constant(1)", "def tire_objectif() -> int:\n return random.randint(min_objectif, max_objectif)", "def random_lottery():\n\treturn 0", "def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print \"Making random numbers\"\n while not thread_stop_event.isSet():\n global counter\n #if counter == 8:\n # emit('done', {'data': 'finito'})\n # break\n if (counter == 20):\n socketio.emit('done', {'data': 'Connected'},namespace='/test')\n counter = 0\n break\n number = round(random()*10, 3)\n print number\n print counter\n counter = counter +1\n socketio.emit('my response', {\n 'data': number,\n 'message': 'Commit message!',\n 'hash': number,\n 'url': 'http://andyzg.github.io',\n }, namespace='/test')\n sleep(self.delay)", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "async def d100(self, ctx):\n await ctx.send(random.randint(1, 100))", "def get_random_value():\n return randint(0, 255) / 256.0", "def generate() -> int:\n return randint(0, 1000000000)", "def event_m20_11_x60(z48=14):\n \"\"\"State 0,1: Random number generation\"\"\"\n GenerateRandomNumber(0, 0, 99)\n \"\"\"State 2: Random number judgment [[DEBUG]] flag 109990 ON can be reliably attached\"\"\"\n CompareEventRandValue(0, 0, z48, 5)\n CompareEventFlag(0, 109990, 1)\n if ConditionGroup(0):\n \"\"\"State 3: Atari: Can be generated\"\"\"\n return 0\n else:\n \"\"\"State 4: Lost: Cannot be generated\"\"\"\n return 1", "def _get_random_value(self):\r\n return random.randint(1, 10)", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def computer_random():\r\n ci = random.sample(range(1,43),5)\r\n return ci", "def get_random_value():\n return randint(0, 255) / 256.0", "async def dice( con, min1=1, max1=6):\n r = random.randint(min1, max1)\n await bot.send_message(con.message.channel, \"**{}**\".format(r))", "def get_random_sleep() -> int:\n return random.randint(1, 9)" ]
[ "0.68448544", "0.6299806", "0.6133432", "0.6133432", "0.58981127", "0.57652456", "0.5718969", "0.569057", "0.5639926", "0.55422294", "0.55386275", "0.55349576", "0.5518699", "0.5515259", "0.5515259", "0.550914", "0.54684156", "0.54265535", "0.5388602", "0.5386833", "0.53852564", "0.5380798", "0.53803945", "0.5377205", "0.53633267", "0.53633267", "0.53453076", "0.5327913", "0.5312086", "0.52904266" ]
0.7287352
0
Generate random database connections mostly between 10 and 20 occurrences, assigned some ticks with a maximum connection pick up to 200
def generate_fake_db_connections(random_state, size): values = random_state.random_integers(low=10, high=20, size=size) picked_max_connections_indexes = random_state.choice( size, round(0.005 * len(values)), replace=False ) for index in picked_max_connections_indexes: # Creates a linear progression between a healthy state to a bad state linear_values = np.arange(20, 210, 10) for n in range(len(linear_values) + random_state.random_integers(0, 6)): try: if n >= len(linear_values): values[index + n] = 200 else: values[index + n] = linear_values[n] except IndexError: pass return values.tolist()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def db_10000(\n empty_graph_db: graph_tuple_database.Database,\n) -> graph_tuple_database.Database:\n # Generate some random graph tuples.\n graph_pool = [\n random_graph_tuple_database_generator.CreateRandomGraphTuple()\n for _ in range(128)\n ]\n\n # Generate a full list of graphs by randomly selecting from the graph pool.\n random_graph_tuples: List[graph_tuple_database.GraphTuple] = [\n copy.deepcopy(random.choice(graph_pool)) for _ in range(10000)\n ]\n # Index the random graphs by ir_id.\n for i, t in enumerate(random_graph_tuples):\n t.ir_id = i\n t.data_flow_steps = i\n\n with empty_graph_db.Session(commit=True) as s:\n s.add_all(random_graph_tuples)\n # Create the empty graph tuples. These should be ignored by the graph\n # reader.\n s.add_all(\n [\n graph_tuple_database.GraphTuple.CreateEmpty(0),\n graph_tuple_database.GraphTuple.CreateEmpty(0),\n ]\n )\n\n return empty_graph_db", "def rand_ips(max_num=None):\n count = 0\n while max_num is None or count < max_num:\n if max_num is not None:\n count += 1\n yield random_ip()", "def choose_random(N):\n db = pymongo.MongoClient('localhost',27020).chembldb\n # Get all CHEMBL IDs\n db.molecules.ensure_index('chembl_id')\n chembl_ids = [m['chembl_id'] for m in db.molecules.find().sort('chembl_id')]\n print len(chembl_ids)\n random.seed(201405291515)\n rands = random.sample(chembl_ids, N)\n return(rands)", "def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)", "def generate(method='random'):\n if method == 'random':\n min_connection_ind = Connection.possible_connections.start\n max_connection_ind = Connection.possible_connections.stop - 1\n connections_list = [Connection(randint(min_connection_ind, max_connection_ind), building)\n for building in Connection.possible_building_codes]\n else:\n connections_list = [Connection(0, building) for building in Connection.possible_building_codes]\n return connections_list", "def random_coaching_graph(num_classes, min_size, max_size, existing_rate):\n\n n = 0\n coaching_graph = graph.Graph(directed=True)\n for i in range(num_classes):\n n += 1\n class_size = random.randint(min_size, max_size)\n coach = infect.User(str(n))\n existing_users = coaching_graph.nodes()\n num_existing = len(existing_users)\n coaching_graph.add_node(coach)\n for j in range(class_size):\n if random.uniform(0,1) > existing_rate or num_existing < 100:\n n += 1\n user = infect.User(str(n))\n else:\n user = random.choice(existing_users)\n\n coaching_graph.add_edge(coach, user)\n\n\n return coaching_graph", "def fake_session_data():\n\n for _ in range(0, 40):\n print random.randint(1, 8)", "def clients(sandbox):\n num_nodes = 2\n for i in range(num_nodes):\n sandbox.add_node(i, params=['--connections', '30'])\n yield sandbox.all_clients()", "async def create_db_pool(bot):\n # Insted of directly connecting we try to connect and if it fails, we notify the user\n try:\n bot.db = await asyncpg.create_pool(\n host=os.environ[\"host\"],\n database=os.environ[\"database\"],\n user=os.environ[\"user\"],\n password=os.environ[\"password\"],\n ssl=os.environ[\"ssl\"],\n )\n except Exception as e:\n bot.db = NoneClass(\"Database is not available, please check your configuration\")\n print_error(\"Could not connect to the database, Most likely because the database credentials are invalid\")\n # We try to play a sound to let the user know that the bot is online\n # If the sound playing fails we just ignore it\n try:\n playsound(\"assets/sounds/connected_to_database.mp3\", block=False)\n except PlaysoundException:\n pass", "def n_async_connect(config, n=1):\n\n\taconfig = config.copy()\n\taconfig['async'] = True\n\n\tresult = []\n\tfor _ in range(n):\n\t\tconn = psycopg2.connect(**aconfig)\n\t\twait(conn)\n\t\tresult.append(conn)\n\treturn result", "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def get_random_db(self):\n rnd = random.random() * self.totals[-1]\n pool_index = bisect.bisect_right(self.totals, rnd)\n return list(self.pool)[pool_index]", "def test_benchmark_BufferedGraphReader_global_random(\n benchmark, db_10000: graph_tuple_database.Database, buffer_size_mb: int,\n):\n benchmark(\n list,\n reader.BufferedGraphReader(\n db_10000,\n buffer_size_mb=buffer_size_mb,\n order=reader.BufferedGraphReaderOrder.GLOBAL_RANDOM,\n ),\n )", "def generate_packets():\n num_packets = randrange(10)\n temp_packets = []\n for i in range(num_packets):\n temp_packets.append(randrange(1000))\n return temp_packets", "def concurrent_connections(self):\n return self.connector.limit", "def generate_number_of_events(max_number):\n\n return randint(1, max_number)", "def generate_new_port(n):\n ports = sample(portsPool,n)\n for p in ports:\n ind = portsPool.index(p)\n del portsPool[ind]\n return ports", "def topology_random_reconnect(self, probability):\n\t\tfor i in range(len(self.sites)):\n\t\t\tfor j in range(len(self.sites)):\n\t\t\t\tif (i != j) and (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability / 2.0:\n\t\t\t\t\t\tchoice_list = [s for s in self.sites if not (s in self.sites[i].neighbors)]\n\t\t\t\t\t\tif len(choice_list) > 0:\n\t\t\t\t\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\t\t\t\t\tself.sites[i].neighbors.remove(self.sites[j])\n\t\t\t\t\t\t\tself.sites[j].neighbors.remove(self.sites[i])\n\t\t\t\t\t\t\tself.sites[i].neighbors.append(choosed)\n\t\t\t\t\t\t\tchoosed.neighbors.append(self.sites[i])", "def add_random_connection(self, genome, max_attempts=50):\n\n \"\"\"\n TODO:\n If all attempts failed, the channel is most likely dense, therefore use the dense selection process.\n \"\"\"\n\n def _connect(n0, n1, channel):\n new_connection_spec, new_connection_params = self.connection_factory()\n self.add_connection(\n genome,\n new_connection_spec,\n new_connection_params,\n n0.historical_mark,\n n1.historical_mark,\n channel)\n\n channel_weights = []\n acc_weight = 0\n for channel in self._channels:\n acc_weight += genome.calc_channel_capacity(channel)\n channel_weights.append(acc_weight)\n\n if acc_weight == 0:\n return False\n\n # TODO: see if can implement weighted random choice more efficiently using bisect\n channel, = random.choices(self._channels, k=1, cum_weights=channel_weights)\n src_type, dst_type = channel\n src_neurons = genome.layers()[src_type]\n dst_neurons = genome.layers()[dst_type]\n\n n_attempt = 0\n while n_attempt < max_attempts:\n n_attempt += 1\n\n n0 = random.choice(src_neurons)\n while n0 is None:\n n0 = random.choice(src_neurons)\n\n n1 = random.choice(dst_neurons)\n while n1 is None:\n n1 = random.choice(dst_neurons)\n\n if genome.has_connection(n0.historical_mark, n1.historical_mark):\n continue\n\n _connect(n0, n1, channel)\n return True\n return False", "def _created_connections(self):\n return len(self._available_connections) + len(self._in_use_connections)", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print \"Making random numbers\"\n while not thread_stop_event.isSet():\n global counter\n #if counter == 8:\n # emit('done', {'data': 'finito'})\n # break\n if (counter == 20):\n socketio.emit('done', {'data': 'Connected'},namespace='/test')\n counter = 0\n break\n number = round(random()*10, 3)\n print number\n print counter\n counter = counter +1\n socketio.emit('my response', {\n 'data': number,\n 'message': 'Commit message!',\n 'hash': number,\n 'url': 'http://andyzg.github.io',\n }, namespace='/test')\n sleep(self.delay)", "def prep_database(sqla):\n create_multiple_people(sqla, random.randint(5, 15))\n create_multiple_accounts(sqla)\n return [account.id for account in sqla.query(Account.id).all()]", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def generate() -> int:\n return randint(0, 1000000000)", "def data_feeder_2():\n return random.sample(range(100), 10)", "def setup(\n conn_list: List[DBConn],\n n_objs: Optional[int],\n seed: Optional[int],\n n_tables: Optional[int],\n db_name: Optional[str],\n table_names: Optional[List[str]],\n ) -> Tuple[int, str, List[str]]:\n\n if not seed:\n seed = int(time.time() * 1e6)\n\n random.seed(seed)\n\n if not db_name:\n db_name = \"db_\" + \"\".join(random.choices(string.ascii_lowercase, k=32))\n\n if not table_names:\n table_names = [\n \"table_\" + \"\".join(random.choices(string.ascii_lowercase, k=32))\n for _ in range(n_tables if n_tables else 0)\n ]\n\n if not n_objs:\n n_objs = random.randint(min(len(table_names), 4), 64)\n\n for obj_id in range(n_objs):\n obj_ver[obj_id] = 0\n\n conn_list[0].execute(\"set global lock_wait_timeout=7\")\n conn_list[0].execute(\"create database if not exists {}\".format(db_name))\n for conn in conn_list:\n conn.execute(\"use {}\".format(db_name))\n\n for table in table_names:\n conn_list[0].execute(\n \"create table if not exists {}(id int not null, value text, primary key (id))\".format(table)\n )\n\n logger.info(\n \"[+] starting history generation (seed: {}, n_objs: {} DB: {}, tables: {})\".format(\n seed, n_objs, db_name, \", \".join(table_names)\n )\n )\n\n return n_objs, db_name, table_names", "def seed(self, seed):\n for i, conn in enumerate(self._conns):\n conn.send((self.SEED, ((seed + i) % 2**32,)))\n return [conn.recv() for conn in self._conns]", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def connection_test_interval(self, seconds=0, minutes=0, hours=0, days=0, timeout=10):\n print('Initializing test.')\n end = datetime.datetime.now() + datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n rows = []\n while end > datetime.datetime.now():\n rows.append(self.connection_test(timeout))\n delta_time = end - datetime.datetime.now()\n if delta_time.days < 0:\n delta_time = datetime.timedelta(0)\n print(f'\\r{delta_time} remaining.', end='')\n print()\n\n return self.connection_data" ]
[ "0.6073718", "0.5974244", "0.57532966", "0.5751731", "0.572404", "0.57141167", "0.5678083", "0.5616051", "0.5593124", "0.5589323", "0.5562536", "0.55545825", "0.551556", "0.548296", "0.54501927", "0.5440958", "0.5414827", "0.5375403", "0.5371529", "0.5350935", "0.5327943", "0.53251266", "0.5319925", "0.53027135", "0.5300692", "0.52726746", "0.5264882", "0.5261088", "0.5249612", "0.52449656" ]
0.7374885
0
Generate random HTTP Status codes, mostly values will be 200, but some of them will be randomly assigned to a 500 during a random range between 1 and 20 ticks, or 0 (Curl returned value when Web sites are not reachable)
def generate_fake_http_status(random_state, size): values = [200] * size picked_error_values_indexes = random_state.choice( size, round(0.0015 * len(values)), replace=False ) picked_zero_values_indexes = random_state.choice( size, round(0.001 * len(values)), replace=False ) for index in picked_zero_values_indexes: values[index] = 0 for idx in picked_error_values_indexes: for i in range(random_state.random_integers(1, 20)): try: values[idx + i] = 500 except IndexError: pass return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status():\n p = randint(0, 10)\n\n if p < 7:\n return problem(\n status=200,\n title=\"Success\",\n detail=\"Il servizio funziona correttamente\",\n ext={\"result\": \"ok\"},\n headers={\"Cache-control\": \"no-cache\"},\n )\n if p < 9:\n return problem(\n status=503,\n title=\"Service Unavailable\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\"Retry-After\": \"1\", \"Cache-control\": \"no-cache\"},\n )\n\n return problem(\n status=429,\n title=\"Too Many Requests\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\n \"Cache-control\": \"no-cache\",\n \"X-RateLimit-Limit\": \"10\",\n \"X-RateLimit-Reset\": \"1\",\n \"X-RateLimit-Remaining\": \"0\",\n \"Retry-After\": \"1\",\n },\n )", "def get_status():\n p = randint(0, 10)\n\n if p < 7:\n return problem(\n status=200,\n title=\"Success\",\n detail=\"Il servizio funziona correttamente\",\n ext={\"result\": \"ok\"},\n headers={\"Cache-control\": \"no-cache\"},\n )\n if p < 9:\n return problem(\n status=503,\n title=\"Service Unavailable\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\"Retry-After\": \"1\", \"Cache-control\": \"no-cache\"},\n )\n\n return problem(\n status=429,\n title=\"Too Many Requests\",\n detail=\"Questo errore viene ritornato randomicamente.\",\n headers={\n \"Cache-control\": \"no-cache\",\n \"X-RateLimit-Limit\": \"10\",\n \"X-RateLimit-Reset\": \"1\",\n \"X-RateLimit-Remaining\": \"0\",\n \"Retry-After\": \"1\",\n },\n )", "def sample_500_response():\n response = requests.get(\"https://google.com\")\n response.status_code = 500\n return response", "def test_allowed_response_codes(self):\n\n def fake_401_response(*args, **kwargs):\n return MockRequestsResponse(401, content=\"Weird\")\n\n def fake_200_response(*args, **kwargs):\n return MockRequestsResponse(200, content=\"Hurray\")\n\n url = \"http://url/\"\n m = HTTP._request_with_timeout\n\n # By default, every code except for 5xx codes is allowed.\n response = m(url, fake_401_response)\n eq_(401, response.status_code)\n\n # You can say that certain codes are specifically allowed, and\n # all others are forbidden.\n assert_raises_regexp(\n BadResponseException,\n \"Bad response.*Got status code 401 from external server, but can only continue on: 200, 201.\", \n m, url, fake_401_response, \n allowed_response_codes=[201, 200]\n )\n\n response = m(url, fake_401_response, allowed_response_codes=[401])\n response = m(url, fake_401_response, allowed_response_codes=[\"4xx\"])\n\n # In this way you can even raise an exception on a 200 response code.\n assert_raises_regexp(\n BadResponseException,\n \"Bad response.*Got status code 200 from external server, but can only continue on: 401.\", \n m, url, fake_200_response, \n allowed_response_codes=[401]\n )\n\n # You can say that certain codes are explicitly forbidden, and\n # all others are allowed.\n assert_raises_regexp(\n BadResponseException,\n \"Bad response.*Got status code 401 from external server, cannot continue.\", \n m, url, fake_401_response, \n disallowed_response_codes=[401]\n )\n\n assert_raises_regexp(\n BadResponseException,\n \"Bad response.*Got status code 200 from external server, cannot continue.\", \n m, url, fake_200_response, \n disallowed_response_codes=[\"2xx\", 301]\n )\n\n response = m(url, fake_401_response, \n disallowed_response_codes=[\"2xx\"])\n eq_(401, response.status_code)\n\n # The exception can be turned into a useful problem detail document.\n exc = None\n try:\n m(url, fake_200_response, \n disallowed_response_codes=[\"2xx\"])\n except Exception, exc:\n pass\n assert exc is not None\n\n debug_doc = exc.as_problem_detail_document(debug=True)\n\n # 502 is the status code to be returned if this integration error\n # interrupts the processing of an incoming HTTP request, not the\n # status code that caused the problem.\n #\n eq_(502, debug_doc.status_code)\n eq_(\"Bad response\", debug_doc.title)\n eq_('The server made a request to http://url/, and got an unexpected or invalid response.', debug_doc.detail)\n eq_('Got status code 200 from external server, cannot continue.\\n\\nResponse content: Hurray', debug_doc.debug_message)\n\n no_debug_doc = exc.as_problem_detail_document(debug=False)\n eq_(\"Bad response\", no_debug_doc.title)\n eq_('The server made a request to url, and got an unexpected or invalid response.', no_debug_doc.detail)\n eq_(None, no_debug_doc.debug_message)", "def test_status_code(self):\n formatted_status_code = get_status_code('python')\n self.assertEqual(formatted_status_code, 200) #compares the test result with the result expected", "def test_bad_request_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_bad_request_code.__iter__()\n length = self.test_bad_request_code.__len__()\n\n while value < self.MAX_BAD_REQUEST_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_BAD_REQUEST_CODE_VALUE:\n value += 1\n\n length -= 1", "def view_status_code(codes):\n\n if \",\" not in codes:\n try:\n code = int(codes)\n except ValueError:\n return Response(\"Invalid status code\", status=400)\n return status_code(code)\n\n choices = []\n for choice in codes.split(\",\"):\n if \":\" not in choice:\n code = choice\n weight = 1\n else:\n code, weight = choice.split(\":\")\n\n try:\n choices.append((int(code), float(weight)))\n except ValueError:\n return Response(\"Invalid status code\", status=400)\n\n code = weighted_choice(choices)\n\n return status_code(code)", "def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover", "def _mapErrorCodeToStatus(code):\n if code == 103:\n return http.NOT_FOUND\n return http.INTERNAL_SERVER_ERROR", "def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')", "def status(self, code, content_length=None):", "def test_status_code_200(self):\n\t\tformatted_status = status_code(self)\n\t\tself.assertEqual(formatted_status, '200')", "def somebells():\n return random.randint(100, 500)", "def test_http_non_int(uqcsbot: MockUQCSBot, code: str):\n uqcsbot.post_message(TEST_CHANNEL_ID, f'!http {code}')\n messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])\n assert len(messages) == 2\n assert messages[-1]['text'] == 'usage: `!http <CODE>` - Returns a HTTP cat. '", "def http501(message):\n response = HttpResponse(message)\n response.status_code = 501\n return response", "def check_status_code(resp, expectedStatusCode):\n if resp.status_code != expectedStatusCode:\n raise MiteError(f\"Invalid status code. Expected: {expectedStatusCode}, Actual: {resp.status_code} \")", "def http_response(status_code: int) -> Tuple[dict, int]:\n return ({'message': HTTP_STATUS_CODES.get(status_code, '')}, status_code)", "def status(code=200):\n\treturn jsonify(server.status_data()), code", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)", "def getResponseCode(self) -> int:\n ...", "def set_status( code ):", "def _healthcheck():\n return '', 200", "def gather_http_status_code(self):\n\n if self.status.ipv6_syntax_validation:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, \"ipv6\"\n ).get()\n else:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, self.subject_type\n ).get()", "def test_good_requests_give_200(self):\n # 200 codes cause the result to be parsed instead of returning the code\n self.assertNot(\n isinstance(\n self._request(\n self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REJECTED])\n ),\n int,\n )\n )", "def test_falsepositive_error(bad_client):\n res = bad_client.get(\"/v0/falsepositive\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def test_different_status_code_metrics(self):\n @self.graph.route(self.ns.collection_path, Operation.Search, self.ns)\n def foo():\n return \"\", 204\n\n response = self.client.get(\"api/v1/foo\")\n assert_that(response.status_code, is_(equal_to(204)))\n\n self.graph.metrics.histogram.assert_called_with(\n \"route\",\n ANY,\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n ],\n )\n self.graph.metrics.increment.assert_called_with(\n \"route.call.count\",\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n \"classifier:2xx\",\n ],\n )", "def generate_fake_oco_status(random_state, size):\n values = [200] * size\n picked_error_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n for index in picked_error_values_indexes:\n values[index] = 300\n\n _range = range(random_state.random_integers(0, 50))\n\n for n in _range:\n position = index + n\n if position < size:\n values[position] = 300\n\n return values", "def check_for_get_code(self, code, url):\r\n resp = self.client.get(url)\r\n self.assertEqual(resp.status_code, code,\r\n \"got code %d for url '%s'. Expected code %d\"\r\n % (resp.status_code, url, code))\r\n return resp", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')" ]
[ "0.69940126", "0.69940126", "0.6744241", "0.59974784", "0.59199", "0.5853604", "0.57881576", "0.577729", "0.56888187", "0.56815845", "0.5667696", "0.5632593", "0.5630804", "0.562505", "0.5622406", "0.5615847", "0.5611005", "0.5606759", "0.5598005", "0.5595857", "0.55831283", "0.5564935", "0.5552717", "0.5542405", "0.5501439", "0.54995084", "0.5493869", "0.5487502", "0.5479177", "0.54592234" ]
0.80144393
0
Listens to the Kafka updates and takes in a handler to process the information
def listen(self, handler): try: logger.info('Listening on topic: {}'.format(self.topic)) consumer = KafkaConsumer(self.topic) for msg in consumer: object_dict = self._extract_updated_object(msg) if object_dict: handler(object_dict) except Exception as ex: if isinstance(ex, KafkaError): logger.error('Error with Kafka: {}'.format(ex))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)", "def main():\n # Wait for dependency services (ES and RE) to be live\n wait_for_dependencies(timeout=180)\n logging.info('Services started! Now starting the app..')\n # Initialize worker group of ESIndexer\n es_indexers = WorkerGroup(ESIndexer, (), count=config()['workers']['num_es_indexers'])\n # Initialize a worker group of RelengImporter\n releng_importers = WorkerGroup(RelengImporter, (), count=config()['workers']['num_re_importers'])\n # All worker groups to send kafka messages to\n receivers = [es_indexers, releng_importers]\n\n # used to check update every minute\n last_updated_minute = int(time.time()/60)\n _CONFIG_TAG = _query_for_config_tag()\n\n # Initialize and run the Kafka consumer\n consumer = _set_consumer()\n\n while True:\n msg = consumer.poll(timeout=0.5)\n if msg is None:\n continue\n curr_min = int(time.time()/60)\n if curr_min > last_updated_minute:\n config_tag = _query_for_config_tag()\n # update minute here\n last_updated_minute = curr_min\n if config_tag is not None and config_tag != _CONFIG_TAG:\n _CONFIG_TAG = config_tag\n # send message to es_indexers to update config.\n es_indexers.queue.put(('ws_event', {\n 'evtype': \"RELOAD_ELASTIC_ALIASES\",\n \"msg\": f\"updating to tag {_CONFIG_TAG}\"\n }))\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n logging.info('End of stream.')\n else:\n logging.error(f\"Kafka message error: {msg.error()}\")\n continue\n val = msg.value().decode('utf-8')\n try:\n data = json.loads(val)\n except ValueError as err:\n logging.error(f'JSON parsing error: {err}')\n logging.error(f'Message content: {val}')\n for receiver in receivers:\n receiver.queue.put(('ws_event', data))", "def handle_updates(self, update):\r\n self.__manage_pump()", "def _on_message_handler(client, callback_dict, message):\n # If the message topic is in the subscribed list, handle it\n if message.topic in callback_dict:\n callback_dict[message.topic](message)", "def start_loop(\n consumer: Consumer,\n message_handler: Callable[[Message], None],\n on_success: Callable[[Message], None] = lambda msg: None,\n on_failure: Callable[[Message, Exception], None] = lambda msg, e: None,\n on_config_update: Callable[[], None] = lambda: None,\n logger: logging.Logger = logging.getLogger('IR')):\n # Used for re-fetching the configuration with a throttle\n last_updated_minute = int(time.time() / 60)\n if not config()['global_config_url']:\n config_tag = _fetch_latest_config_tag()\n\n while True:\n msg = consumer.poll(timeout=0.5)\n if msg is None:\n continue\n curr_min = int(time.time() / 60)\n if not config()['global_config_url'] and curr_min > last_updated_minute:\n # Check for configuration updates\n latest_config_tag = _fetch_latest_config_tag()\n last_updated_minute = curr_min\n if config_tag is not None and latest_config_tag != config_tag:\n config(force_reload=True)\n config_tag = latest_config_tag\n on_config_update()\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n logger.info('End of stream.')\n else:\n logger.error(f\"Kafka message error: {msg.error()}\")\n continue\n val = msg.value().decode('utf-8')\n try:\n msg = json.loads(val)\n except ValueError as err:\n logger.error(f'JSON parsing error: {err}')\n logger.error(f'Message content: {val}')\n consumer.commit()\n continue\n logger.info(f'Received event: {msg}')\n start = time.time()\n try:\n message_handler(msg)\n # Move the offset for our partition\n consumer.commit()\n on_success(msg)\n logger.info(f\"Handled {msg['evtype']} message in {time.time() - start}s\")\n except Exception as err:\n logger.error(f'Error processing message: {err.__class__.__name__} {err}')\n logger.error(traceback.format_exc())\n # Save this error and message to a topic in Elasticsearch\n on_failure(msg, err)", "def listen(client, main):\n\n @client.event\n async def on_message_edit(old, message):\n main.message_handler(message, True)", "async def event_handler(self, response):\n data = ujson.loads(response.data)\n if isinstance(data, dict):\n if data['event'] == 'subscribed':\n print('Subscribed to channel: {0}, for pair: {1}, on channel ID: {2}'.format(data['channel'], data['pair'], data['chanId']))\n self.channel_mapping[data['chanId']] = (data['channel'], data['pair'])\n elif data['event'] == 'info':\n print('Exchange: {0} Websocket version: {1}'.format(self.id, data['version']))\n elif isinstance(data, list):\n if isinstance(data[1], str):\n print('Heartbeat on channel {0}'.format(data[0]))\n else:\n # Published data, time stamp and send to appropriate queue\n timestamp = self.microseconds() / 1000\n datetime = self.iso8601(timestamp)\n if self.channel_mapping[data[0]][0] == 'book':\n pair_id = self.channel_mapping[data[0]][1]\n await self.queues['orderbooks'][pair_id].put((data, timestamp, datetime))", "def handle_sc_event(store, changed_keys, info):\n\n for key in changed_keys:\n SC_HANDLERS[key](key=key, info=info)", "def handler(event, context):\n message = [record['body'] for record in event.get('Records', [])]\n email_record = json.loads(message[0])[\"Records\"][0]\n\n new_email = [(email_record['s3']['bucket']['name'],\n urllib.parse.unquote(email_record['s3']['object']['key']))]\n\n if new_email:\n LOG.info(\"Changed/new object notification received from S3 bucket to the sqs queue\")\n for bucket, s3_key in new_email:\n LOG.info(\"Processing S3 bucket://%s/%s\", bucket, s3_key)\n email_body = S3.Object(bucket, s3_key).get()['Body'].read().decode('utf-8')\n\n # Process PBS job info and push the metadata doc to AWS ES\n _process_pbs_job_info(email_body)\n else:\n LOG.info(\"No new/updated email record found in the S3 bucket\")", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)", "def update_handler(self, update):\n \n m = update.get('message',{})\n\n sender = self.get_sender(m)\n if sender == self.user_id: return\n \n # code that'll execute upon receiving any message\n if self.greeting and m:\n self.greeting(m)\n\n # parse bot commands\n command, params = self._parse_commands(m)\n \n if command: \n self._apply_command_filter(m, command, params)\n else:\n self._apply_msg_filter(m)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def __msg_handler(self, bot, update):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def listen_to_message(**payload):\n\n data = payload['data']\n\n try:\n message = data['text']\n user = data['user']\n message_id = data['client_msg_id']\n time = data['event_ts']\n channel = data['channel']\n process_data({'user': user, 'message': message, 'message_id': message_id, 'channel': channel, 'time': time})\n except KeyError:\n pass\n except Exception as e:\n logging.error(e)\n return None", "def __msg_handler(self, update, bot):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def _handle_message(self, msg):\n self.event('message', msg)", "def listen_for_new_updates(event):\n\n if event.retval:\n news_indicator.create_and_update_menu(event.retval)\n if NewsIndicator.notifications:\n show_notifications(event.scheduled_run_time)\n Gtk.main()", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def on_update_characteristic(self, characteristic, data):\r\n try:\r\n if len(self._listeners) == 0:\r\n return\r\n\r\n data_str = self._decode_data(data)\r\n\r\n if characteristic.uuid == \\\r\n Debug.DEBUG_STDINOUT_BLUESTSDK_SERVICE_UUID:\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(listener.on_stdout_receive(\r\n self, data_str))\r\n\r\n elif characteristic.uuid == \\\r\n Debug.DEBUG_STDERR_BLUESTSDK_SERVICE_UUID:\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(listener.on_stderr_receive(\r\n self, data_str))\r\n except BTLEException as e:\r\n self._node._unexpected_disconnect()", "def __clb_handler(self, update, context):\n trigger = update.callback_query.data\n self.__handler(context, update, trigger)", "def event_in_cb(self, msg):\n self.event = msg.data", "def cb(self, msg, args):\n log, topic = args\n\n if topic not in self.msg_counter.keys():\n self.msg_counter[topic] = 0\n\n self.msg_counter[topic] += 1\n self.check_msg(msg, log, topic)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_watch(self, payload):\n pass", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def subscribeConsumer(consumer):", "def subscribe(receiver, updateInterval=10):", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()" ]
[ "0.63225055", "0.6289907", "0.6207734", "0.6169839", "0.61065733", "0.6104087", "0.60993975", "0.59799695", "0.5972327", "0.5955253", "0.5955253", "0.5937061", "0.5928503", "0.5820025", "0.5792916", "0.5788568", "0.5752678", "0.57204837", "0.57182246", "0.5712616", "0.5697013", "0.56833184", "0.56725556", "0.5671034", "0.5671034", "0.5666628", "0.5652497", "0.56498396", "0.5649809", "0.5648582" ]
0.7878974
0
Extracts core object data from Kafka message
def _extract_updated_object(self, msg): value = getattr(msg, 'value', None) if not value: return None str_value = str(msg.value, 'utf-8') payload = json.loads(str_value)['payload'] object_dict = payload['after'] return object_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decodeData(self, msg):\n stream_info = {}\n try:\n # Decode Streaming data\n stream_data = streaming_pb2.MsgProto()\n stream_data.ParseFromString(msg)\n stream_info = {\n \"topic\": stream_data.subject,\n \"timestamp\": stream_data.timestamp,\n \"customer_id\": stream_data.customer_id,\n \"data\": stream_data.data,\n \"msp_ip\": stream_data.msp_id\n }\n except Exception as e:\n raise e\n\n try:\n if stream_info:\n data_decoder = self.event_decoder\n data_decoder.ParseFromString(stream_info[\"data\"])\n stream_info[\"data\"] = json_format.MessageToDict(data_decoder, preserving_proto_field_name=True)\n return stream_info\n except Exception as e:\n print(\"Exception Received for customer \" +\n \"%s: %s\" % (self.topic, str(e)))", "def kafka_deserializer(data):\n return pickle.loads(data)", "def _process_message(self, obj):\n pass", "def get_new_data(self):\n msgs = self.consumer.get_new_messages()\n return convert_messages(msgs, self.deserialise_function)", "async def extract_data_from_msg(msg):\n\n body = msg.get_body(('html', 'plain',))\n\n msg_out = {\n 'status': 'delivered',\n 'subject': msg['Subject'],\n 'received': datetime.datetime.now().isoformat(),\n 'from': msg['From'].addresses[0],\n 'recipients': list(msg['To'].addresses),\n 'original-to': msg['X-Original-To'],\n 'delivered-to': msg['Delivered-To'],\n 'dkim-signature': msg['DKIM-Signature'],\n 'message-id': msg['Message-ID'],\n 'domain-signature': msg['DomainKey-Signature'],\n 'date': msg['Date'].datetime,\n 'return': msg['Return-Path'] or msg['Reply-To'],\n 'in-thread': False,\n 'body-type': body.get_content_type(),\n 'body-charset': body.get_content_charset(),\n 'body': body.get_content(),\n 'attachments': []\n }\n\n for ind, att in enumerate(msg.iter_attachments()):\n msg_out['attachments'].append({\n 'index': ind,\n 'type': att.get_content_type(),\n 'filename': att.get_filename()\n })\n\n if msg['Thread-Topic']:\n msg_out['in_thread'] = True\n msg_out['thread-topic'] = msg['Thread-Topic']\n msg_out['thread-index'] = msg['Thread-index']\n\n return msg_out", "def get_new_data(self):\n msgs = self.consumer.get_new_messages()\n return convert_messages(msgs, json.loads)", "def get_new_data(self):\n msgs = self.consumer.get_new_messages()\n return convert_messages(msgs, self._process_record)", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = json.loads(data.decode()) \n print(\"DATA FROM BROKER : \", info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def extract(self, data):", "def _parse_data(self, queue_msg):\r\n try:\r\n result = json.loads(queue_msg)\r\n except (TypeError, ValueError):\r\n log.error(\"External message should be a JSON serialized dict.\"\r\n \" Received queue_msg = %s\", queue_msg)\r\n raise\r\n msg = result['msg']\r\n return msg", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = pickle.loads(data) \n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def extract_data_from_json_cwl_message(message: dict) -> List[str]:\n if message[\"messageType\"] == \"CONTROL_MESSAGE\":\n logger.info(f\"Got CONTROL_MESSAGE from CloudWatch: {message}, skipping\")\n return []\n\n elif message[\"messageType\"] == \"DATA_MESSAGE\":\n data = []\n\n if \"logEvents\" not in message:\n logger.error(\n f\"Got DATA_MESSAGE from CloudWatch Logs but logEvents are not present, \"\n f\"skipping payload: {message}\"\n )\n return []\n\n events = message[\"logEvents\"]\n\n for event in events:\n message = event[\"message\"]\n logger.debug(f\"message: {message}\")\n\n data.append(message)\n\n return data\n\n else:\n logger.error(f\"Got unknown messageType: {message['messageType']} , skipping\")\n return []", "def on_message(client, userdata, message):\n print(f'{message.topic} {message.payload.decode(\"utf-8\")}') # Print message topic and payload", "def message_to_objects(message):\n doc = etree.fromstring(message)\n if doc[0].tag == \"post\":\n # Skip the top <post> element if it exists\n doc = doc[0]\n entities = element_to_objects(doc)\n return entities", "def get_message(self, context, schema_class, extras):\n\n message_data = schema_class(instance=self, extras=extras).data\n return message_data", "def parse_slack_message_object(message_obj):\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata['channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(\n message_obj._client.users[metadata['user']]['name']\n )\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n\n return metadata", "def deserialize(cls, record):\n return cls(\n source=record.get(\"source\", \"\"),\n category=record.get(\"category\", \"\"),\n name=record.get(\"name\", \"\"),\n message=record.get(\"message\", \"\"),\n timestamp=record.get(\"timestamp\", \"\"),\n **record[\"data\"],\n )", "def get_message():\n # Only run xray in the AWS Lambda environment\n if runs_on_aws_lambda():\n xray_subsegment = xray_recorder.current_subsegment()\n xray_subsegment.put_annotation(\"key\", \"value\")\n # Sample metadata\n # subsegment.put_metadata(\"operation\", \"metadata\", \"python object/json\")\n xray_recorder.end_subsegment()", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = ET.fromstring(data)\n info = { info[0].tag : info[0].text, info[1].tag : info[1].text}\n #print(info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def _unwrap(self, msg):\n return msg['content']['data']", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def on_message(client, userdata, message): \n print(\"Topic: \" + message.topic + \" Message: \" + message.payload.decode('utf-8'))", "def __init__(self, buff):\n # TODO: Handle having produced to a non-existent topic (in client)\n fmt = '[S [ihq] ]'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n self.topics = {}\n for (topic, partitions) in response:\n self.topics[topic] = {}\n for partition in partitions:\n pres = ProducePartitionResponse(partition[1], partition[2])\n self.topics[topic][partition[0]] = pres", "def __init__(self, buff):\n fmt = '[S [ihqY] ]'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n self.topics = defaultdict(dict)\n for (topic, partitions) in response:\n for partition in partitions:\n self.topics[topic][partition[0]] = FetchPartitionResponse(\n partition[2],\n self._unpack_message_set(partition[3],\n partition_id=partition[0]),\n partition[1]\n )", "def read_metadata_record(raw_features_string):\n full_metadata = json.loads(raw_features_string)\n return {\"sha256\": full_metadata[\"sha256\"], \"appeared\": full_metadata[\"appeared\"], \"label\": full_metadata[\"label\"]}", "def got_info(self, cloud_obj):", "def process_message_row(self, message_row):\n msg_received_ts = message_row[0]\n msg_raw_data = json.loads(message_row[1])\n msg_data = {\n \"ts\": msg_received_ts,\n \"origin_ts\": msg_raw_data[\"origin_server_ts\"],\n \"origin\": msg_raw_data[\"origin\"],\n \"sender\": msg_raw_data[\"sender\"],\n \"event_id\": msg_raw_data[\"event_id\"],\n \"room_id\": msg_raw_data[\"room_id\"],\n \"message\": msg_raw_data[\"content\"][\"body\"],\n \"url\": msg_raw_data[\"content\"].get(\"url\", None),\n \"chat_type\": \"matrix\",\n \"nick\": self.sender_to_nick(msg_raw_data[\"sender\"]),\n }\n return msg_data", "def handler(message):\n records = message.collect()\n list_collect = []\n for record in records:\n # Parse record\n read = json.loads(record[1].decode('utf-8'))\n list_collect.append((read['text'],read['tags']))\n data = (clean(read['text']),read['tags'])\n job = read['index']\n\n data = spark.createDataFrame([data],['cleaned_body','tags'])\n data = model.transform(data)\n d = data.select('features','tags').collect()\n\n keys = retrieve_keys(d[0]['tags'])\n # look to optimize slice length based on keys and throughput\n slice_length = max(len(keys)//10000,min(len(keys)//49,200))\n print(slice_length)\n keys_sliced = [','.join(keys[i:i+slice_length]) for i in range(0,len(keys),slice_length)]\n keys = spark.createDataFrame(keys_sliced, StringType())\n score_udf = udf(lambda r: get_features(r,d[0]['features']), FloatType())\n keys = keys.withColumn('features', score_udf(keys['value'])).collect()\n # need to get top result from zadd\n report_to_redis(job)\n return", "def read_metadata_record(raw_features_string):\n all_data = json.loads(raw_features_string)\n metadata_keys = {\"sha256\", \"appeared\", \"label\", \"avclass\"}\n return {k: all_data[k] for k in all_data.keys() & metadata_keys}" ]
[ "0.5814129", "0.57360256", "0.54865336", "0.54360235", "0.5400411", "0.5398354", "0.53759235", "0.5354414", "0.5335589", "0.5330614", "0.53231025", "0.5321129", "0.5255818", "0.52450305", "0.52263457", "0.52176183", "0.5157237", "0.51568544", "0.5147842", "0.51438856", "0.5123015", "0.5114333", "0.5092801", "0.50585705", "0.5057688", "0.50554377", "0.5055044", "0.5048033", "0.5040889", "0.50281864" ]
0.6105383
0
Return the Data from the nyc table (listings.csv).
def listings_data(): stmt = db.session.query(nyc).statement df = pd.read_sql_query(stmt, db.session.bind) df["latitude"] = pd.to_numeric(df["latitude"]) df["longitude"] = pd.to_numeric(df["longitude"]) df["accommodates"] = pd.to_numeric(df["accommodates"]) data = df.to_dict(orient='index') # Create a dictionary entry for each row of metadata information # data = {} # for result in results: # # data["ID"] = result[0] # data["LISTING_URL"] = result[1] # data["NAME"] = result[2] # data["HOST_ID"] = result[3] # data["NEIGHBORHOOD"] = result[4] # data["NEIGHBORHOOD_GROUP"] = result[5] # data["CITY"] = result[6] # data["ZIPCODE"] = result[7] # data["LAT"] = float(result[8]) # data["LON"] = float(result[9]) # # print(data) return jsonify(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self, csv_file):\n pass", "def read_csv():", "def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def get_data():\n with open(os.path.join('data', 'demo_file.csv'), 'r') as fin:\n reader = csv.reader(fin)\n data = list(reader)\n\n return data", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def GetCategories():\n return GetDataFromCsvFile('categories.csv')", "def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df", "def get_csv():\n with requests.Session() as s:\n download = s.get(CSV_URL)\n decoded_content = download.content.decode('utf-8')\n cr = csv.reader(decoded_content.splitlines(), delimiter=',')\n my_list = list(cr)\n return [row[2] for row in my_list[1:]]", "def read_entry_lists(floor_csv, par_df):\n # print(\"Info : my_lib/entry_list/read_entry_lists().\")\n par_id_list = par_df[\"ID\"].values.tolist()\n genre_code_list = par_df[\"GENRE_CODE\"].values.tolist()\n\n \"\"\"\n floor.csv\n ---------\n ID,X,Y,BLOCK\n 27,0,0,C\n 26,1,0,C\n 25,2,0,C\n \"\"\"\n tbl_df = pd.read_csv(floor_csv,\n sep=',', engine='python')\n tbl_id_list = tbl_df[\"ID\"].values.tolist()\n return tbl_id_list, par_id_list, genre_code_list", "def get_data():\n return pd.read_csv(\n 'cetml1659on.dat',\n skiprows=6,\n sep='\\s+',\n na_values=['-99.9', '-99.99'],\n )", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def gethistory(ticker):\n link = 'http://ichart.finance.yahoo.com/table.csv?s=' + ticker\n response = urllib.urlopen(link)\n html = response.read()\n return readcsv(html)", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def get_nyt(dates):\n all_articles = pd.DataFrame()\n total = 0\n print('Date range: ' + str(dates[0]) + ' to ' + str(dates[-1]))\n print(p_statement)\n doc_name = f'all-articles-range-{dates[0]}-{dates[-1]}.csv'\n for date in dates:\n response = send_request(date)\n df = parse_response(response)\n total += len(df)\n all_articles = pd.concat([all_articles, df])\n print('Number of articles collected: ' + str(total))\n all_articles.to_csv(doc_name, index=False)\n print('Table saved as CSV')\n return all_articles", "def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data", "def get_data(endpoint):\n\tdata = []\n\treader = csv.DictReader(open(\"adata/marks_data_\" + endpoint.lower() + \"_sites.tsv\"), delimiter='\\t')\n\tfor line in reader:\n\t\tdata.append(line)\n\treturn data", "def get_duels(self, csv_file, table):\r\n conn, curs = connection_database(self.db_name, self.host, self.user, self.password, self.local, self.ssl_ca)\r\n query = \"SELECT * FROM {};\".format(table)\r\n curs.execute(query)\r\n result = curs.fetchall()\r\n end_connection(conn, curs)\r\n\r\n # Write comparisons results in csv file\r\n with open(csv_file, mode='w') as file:\r\n file_writer = csv.writer(file, delimiter=',', lineterminator='\\n')\r\n for comparison in result:\r\n file_writer.writerow(comparison)", "def GetCountries():\n return GetDataFromCsvFile('countries.csv')", "def get_data(self, tablename):\n conn = self.get_conn()\n c = conn.cursor()\n status_sql = self.get_status_sql(tablename)\n c.execute(status_sql)\n results = c.fetchall()\n data = []\n for row in results:\n data.append(dict_from_row(row))\n conn.commit()\n conn.close()\n return data", "def get_cit_data(ticker):\n log = logging.getLogger(__name__)\n # TODO: This is probably not working anymore (need to refresh files)\n file = os.path.join(c.cfg['default']['data'], 'COT', 'Supplemental', 'T{}.PRN'.format(ticker))\n columns = ['Non-Commercial Longs', 'Non-Commercial Shorts',\n 'Commercial Longs', 'Commercial Shorts',\n 'Non-Reportable Longs', 'Non-Reportable Shorts',\n 'Index Trader Longs', 'Index Trader Shorts']\n\n # Read Data from csv file\n cit = pd.read_csv(file, parse_dates=True, index_col=0, header=None, names=columns)\n \n\n return cit", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def csvdata():\n return render_template(\"data.html\")", "def GetCurrencies():\n return GetDataFromCsvFile('currencies.csv')", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "def get_data(self):\n data = load.loader.get_data_for_hotel(self.hotel_name, self.filter)\n\n self.data_items = []\n\n if data:\n for row in data:\n for col in row:\n self.data_items.append(col)\n self.row_number = str(self.get_row_number())\n\n self.hotel_full_data = self.hotel_name + ' ' + load.loader.get_hotel_address(self.hotel_name)", "def read_file():\r\n #with nos permite manejar el archivo dentro del bloque y despues cerrarlo\r\n with open('Entries.csv') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n return data", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )", "def getCSV(self, name, startDate, endDate):\n\t\tstartYear, startMonth, startDay = self.getDate(str(startDate))\n\t\tendYear, endMonth, endDay = self.getDate(str(endDate))\n\t\turl = 'http://real-chart.finance.yahoo.com/table.csv?s='+name\n\t\turl = url + '&a='+startMonth+'&b='+startDay+'&c='+startYear\n\t\turl = url + '&d='+endMonth+'&e='+endDay+'&f='+endYear+'&g=d&ignore=.csv'\n\t\tresponse = urllib2.urlopen(url)\n\t\tcr = csv.reader(response)\n\t\tcsvList = list(cr)\n\t\treturn csvList", "def get_share_list():\n url = \"https://www1.nseindia.com/content/equities/EQUITY_L.csv\"\n resp = requests.get(url)\n resp = csv_to_list(resp)[1:-1]\n return create_stock(resp)" ]
[ "0.61890984", "0.60342085", "0.60018224", "0.59828407", "0.5916176", "0.5891303", "0.5889863", "0.5724109", "0.56923395", "0.5666559", "0.5636994", "0.5636967", "0.56118476", "0.5610883", "0.56072223", "0.5600865", "0.5569014", "0.5568943", "0.55665857", "0.55617994", "0.55563986", "0.5553683", "0.5529635", "0.552419", "0.55229586", "0.5512458", "0.55084026", "0.5506422", "0.54982805", "0.5483113" ]
0.6930467
0
Find the nunber of boundary edges that each node participate in. This is stored as a node level attribute 'b_deg' in nodes in g that are part of nbunch
def set_boundary_degrees_old(g, sg): boundary_degree = {} for u in sg.nodes(): boundary_degree[u] = 0 for v in g.neighbors(u): if not sg.has_node(v): boundary_degree[u] += g.number_of_edges(u, v) # for a multi-graph nx.set_node_attributes(sg, values=boundary_degree, name='b_deg')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundary_nodes(G,nbunch):\n eboundary = nx.edge_boundary(nx.Graph(G),nbunch)\n nboundary = []\n for u,v in eboundary:\n if (u in nbunch) and (v not in nbunch):\n if u not in nboundary:\n # avoid duplicate entries\n nboundary.append(u)\n elif (u not in nbunch) and (v in nbunch):\n if v not in nboundary:\n # avoids duplicate entries\n nboundary.append(v)\n else:\n raise Exception(\"Error in edge boundary\")\n return nboundary", "def boundary_edges(G,zones):\n edges = set()\n for z in zones:\n edges.update(nx.edge_boundary(G,z.nodes()))\n nodes = set()\n n2n = {}\n for u,v in edges:\n nodes.update({u})\n nodes.update({v})\n #try:\n # n2n[u] += [v]\n #except KeyError:\n # n2n[u] = [v]\n #\n #try:\n # n2n[v] += [u]\n #except KeyError:\n # n2n[v] = [u]\n Gbound = nx.Graph(G.subgraph(nodes))\n for comp in nx.connected_components(Gbound):\n for nn in comp:\n n2n[nn] = comp.difference({nn})\n return edges,n2n", "def nelebd(self):\n if self._nelebd is None:\n if self.boundary_file is not None:\n self._nelebd = self.get_bnd_npoin()\n else:\n raise TelemacException(\\\n \"Can not read nelebd no boundary file was given\")\n\n return self._nelebd", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def find_B(self):\n max_lb = 0\n for arc in self.arcs():\n lb = self.arc_info[arc[0]]['lower_bound']\n max_lb = max(max_lb, lb)\n n = len(self)\n m = len(list(self.edges()))\n return((m - n + 2)*max_lb)", "def bounds(self, prev_frb=None, exclude_ports=None):\n if not exclude_ports:\n exclude_ports = []\n out_bounds = []\n node_list = self._net_node.peer_list\n my_nid = self._net_node.node_id\n node_list.append(my_nid)\n node_list.sort()\n myi = node_list.index(my_nid)\n num_nodes = len(node_list)\n for i, peer1 in enumerate(node_list):\n # Preconditions:\n # peer1 < peer2\n # self < peer1 < peer2 || peer1 < peer2 <= self\n if i == myi:\n continue\n p2i = (i + 1) % num_nodes\n peer2 = node_list[p2i]\n assert (my_nid < peer1 or peer2 <= my_nid),\\\n \"invalid nid ordering self={0}, peer1={1}, peer2={2}\".\\\n format(my_nid, peer1, peer2)\n # base scenario when the local node is initiating the FRB\n hops = 1\n root_nid = my_nid\n bound_nid = my_nid\n if not prev_frb:\n bound_nid = peer2\n frb_hdr = FloodRouteBound(root_nid, bound_nid, hops)\n if frb_hdr:\n prtno = self._net_node.query_port_no(peer1)\n if prtno and prtno not in exclude_ports:\n out_bounds.append((prtno, frb_hdr))\n else:\n assert prev_frb.bound_nid != my_nid,\\\n \"this frb should not have reached this node ny_nid={0} prev_frb={1}\".\\\n format(my_nid, prev_frb)\n hops = prev_frb.hop_count + 1\n root_nid = prev_frb.root_nid\n if peer1 < my_nid: # peer1 is a predecessor\n if prev_frb.bound_nid > peer1 and prev_frb.bound_nid < my_nid: # bcast to peer1\n if peer2 < prev_frb.bound_nid:\n bound_nid = peer2\n else:\n bound_nid = prev_frb.bound_nid\n else:\n continue\n else: # peer1 is a successor\n if prev_frb.bound_nid < my_nid: # bcast to peer1\n if peer2 < my_nid and peer2 > prev_frb.bound_nid:\n bound_nid = prev_frb.bound_nid\n elif (peer2 < my_nid and peer2 <= prev_frb.bound_nid) or \\\n peer2 > my_nid:\n bound_nid = peer2\n else: # prev_frb.bound_nid > my_nid\n if prev_frb.bound_nid <= peer1:\n continue\n if peer2 < my_nid or prev_frb.bound_nid < peer2:\n bound_nid = prev_frb.bound_nid\n else:\n bound_nid = peer2\n frb_hdr = FloodRouteBound(root_nid, bound_nid, hops)\n if frb_hdr:\n prtno = self._net_node.query_port_no(peer1)\n if prtno and prtno not in exclude_ports:\n out_bounds.append((prtno, frb_hdr))\n return out_bounds", "def starting_nodes(self):\n # Level 0 nodes in a directed graph will have 1 or more out_edges but no in_edges\n nodes_with_outs = set(e[0] for e in self.G2.out_edges())\n nodes_with_ins = set(e[1] for e in self.G2.in_edges())\n return nodes_with_outs - nodes_with_ins", "def GetBoundaryEdgesTri(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n node_arranger = NodeArrangementTri(p-1)[0]\n\n # CONCATENATE ALL THE EDGES MADE FROM ELEMENTS\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]]),axis=0)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def u_edges(self):\n return np.linspace(self.u_min, self.u_max, self.n_ubins + 1)", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def boundary_edge_ids(self,):\n return self.boundary_edge_ids_", "def num_edges(self):\n return (self.n * (self.L.size() - 1) - self.num_loop_vertices()) // (1 + int(not self.variant.is_bipartite()))", "def calculateMetallicityBinEdges(self):\n\n if self.binInLogSpace:\n logMetallicities = np.log10(self.metallicityGrid)\n b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2.\n b = 10.**b #the boundaries for integration are not in log space so\n #convert to \"normal\" numbers.\n else:\n b= (self.metallicityGrid[1:] - self.metallicityGrid[:-1])/2. \\\n + self.metallicityGrid[:-1] \n\n self.metallicityBinEdges = np.zeros(len(b)+2)\n\n #the lowest/highest metallicity bin edge are set in options\n #the calculated b edges are all in between\n\n self.metallicityBinEdges[0] = self.metallicityLowerLimit\n self.metallicityBinEdges[-1] = self.metallicityUpperLimit\n self.metallicityBinEdges[1:-1] = b", "def neighborhood(G,n,o):\n base = G[n]\n neighbors = {}\n neighbors[n] = 0\n newNodes = set(neighbors.keys())\n for i in range(1,o+1):\n #for node in neighbors.keys():\n nodes = newNodes.copy()\n newNodes = set()\n for node in nodes:\n branch = G[node]\n for node in branch:\n if node not in neighbors:\n newNodes.add(node)\n neighbors[node]=i\n return neighbors", "def cell_edges(self):", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def GetBoundaryEdgesQuad(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = NodeArrangementQuad(p-1)[0]\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()", "def NPL(self):\n self.edge = np.zeros((np.sum(self.Adjmatrix), 3))\n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n if(self.Adjmatrix[i, j] == 1):\n self.edge[Temp, 0], self.edge[Temp, 1], self.edge[Temp, 2] = i, j, self.Dismatrix[i, j]\n Temp += 1\n \n self.Totallength = ((np.max(self.Geox) - np.min(self.Geox))**2 + (np.max(self.Geoy) - np.min(self.Geoy))**2)**0.5\n self.norm_edge = self.edge[:, 2]/self.Totallength", "def branches(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts >= 3 ]", "def nbor(self):\n if self._nbor is None:\n if self.boundary_file is not None:\n self._nbor = self.get_bnd_numbering()\n else:\n raise TelemacException(\\\n \"Can not read nbor no boundary file was given\")\n\n return self._nbor", "def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def get_boundary_layers(cell_cent, el, num_lyrs, bc_loc, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n if(struct_grd):\n fctr = 1\n corr = 0\n lyrs = float(num_lyrs-1)+ 0.0001\n else:\n fctr = 2\n corr = 1\n lyrs = float(num_lyrs)+ 0.0001\n\n lyrs = 1.0001*float(num_lyrs-1)\n for d in range(dim):\n bound_range[2*d] = np.min(cell_cent[:,d]) + corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) - corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] - lyrs*el[d]\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n #store only those key value pair that are in the bc_loc\n #this in the end returns mesh with ghost layer cells, \n #if they've been applied already\n keys = bound_nodes.keys()\n keys_temp = [kk for kk in keys]\n for kk in keys_temp:\n if kk not in bc_loc:\n bound_nodes.pop(kk, None)\n \n return bound_nodes", "def bfs(g, startnode):\n\n # Initiating dictionaries.\n d = {}\n n = {}\n q = collections.deque()\n\n # Set all distances to infinity.\n for i in g.keys():\n d[i] = float(\"inf\")\n\n # Setting up the initial node's properties.\n d[startnode] = 0\n n[startnode] = 1\n\n q.append(startnode)\n\n while len(q) > 0:\n j = q.popleft()\n\n # For every neighbor of j.\n for h in g[j]:\n if d[h] == float(\"inf\"):\n d[h] = d[j] + 1\n n[h] = n[j]\n q.append(h)\n elif d[h] == d[j] + 1:\n n[h] = n[h] + n[j]\n\n return d, n", "def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary", "def bfs(g,startnode):\n Q = deque('') # initialize Q to be empty queue\n \n inf = float(\"inf\") # define infinity\n result = {}\n # assign infinite length to every node\n for node in g:\n result[node] = inf\n result[startnode] = 0 # assign start node length = 0\n Q.append(startnode) # attach the start node to the queue\n \n while len(Q) > 0:\n j = Q.popleft()\n for neighbor in g[j]:\n if result[neighbor] == inf:\n result[neighbor] = result[j] + 1\n Q.append(neighbor)\n \n return result", "def computeBoundaryEdges(edgesN, bfacesN):\n # Extracts sets of edges-nodes (add 1 to indexes - Matlab indexing)\n edges1 = (bfacesN[[0, 1], :] + 1).transpose()\n edges2 = (bfacesN[[1, 2], :] + 1).transpose()\n edges3 = (bfacesN[[2, 0], :] + 1).transpose()\n\n # Number of boundary-faces\n dim = bfacesN.shape\n nBoundaryFaces = dim[1]\n\n # Boudary faces as sets of their edges (vertices)\n vertices = np.zeros([nBoundaryFaces*3, 2])\n vertices[0::3] = edges1\n vertices[1::3] = edges2\n vertices[2::3] = edges3\n\n # Repeated setts of nodes (joint edges) are eliminated\n [temp, _] = deleteDuplicateRows(vertices)\n\n matrixs = np.concatenate((edgesN + 1, temp), axis=0)\n\n matrixs.sort(axis=1)\n\n tags = np.lexsort((matrixs[:, 1], matrixs[:, 0]))\n matrixs = matrixs[tags]\n\n ind0 = np.diff(matrixs[:, 0]) == 0\n ind1 = np.diff(matrixs[:, 1]) == 0\n\n # Concatenate vectors (vertical stack)\n ind = np.vstack((ind0, ind1))\n ind = ind.transpose()\n\n # Which ones were reps? k is a vector of indexes to matrix\n k = np.array(np.all(ind, axis=1).ravel().nonzero())\n\n # tags(k) is an index vector to edgesN (matrix) and denotes those edges\n # which are on boundary tags(k+1) is an index vector to matrix and\n # matrix(tags(k+a)) is the same as bedges, but in different order.\n # I could just return tags(k), but we want that the order is the same\n # as in bEdgesN\n tags2 = np.array(np.argsort(tags[k+1]))\n\n bEdges = np.array(tags[k[0][tags2]], dtype=np.int)\n bEdges = bEdges[0,:]\n\n return bEdges", "def GetBoundaryEdgesTet(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesTet()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"tri\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesTri()", "def GetBoundaryLoops(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.InferBoundaryElementType() != \"line\":\n raise NotImplementedError(\"Computing boundary loops is only supported for tri/quad meshes for now\")\n\n self.GetEdges()\n\n # First create a node to neighbour map i.e. node as key and its two neighbouring nodes as value\n nodeToNeighboursMap = dict()\n for i in range(self.edges.shape[0]):\n\n if self.edges[i,0] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,0]] = [self.edges[i,1],-1]\n else:\n nodeToNeighboursMap[self.edges[i,0]][1] = self.edges[i,1]\n\n if self.edges[i,1] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,1]] = [self.edges[i,0],-1]\n else:\n nodeToNeighboursMap[self.edges[i,1]][1] = self.edges[i,0]\n\n # Now create a vector of face loops\n faceLoops = []\n while nodeToNeighboursMap:\n # Insert the first node from node to edge map and its two neighbours in order and erase it from the map\n faceLoop = []\n mapBegin = next(iter(nodeToNeighboursMap))\n faceLoop.append(nodeToNeighboursMap[mapBegin][0])\n faceLoop.append(mapBegin)\n faceLoop.append(nodeToNeighboursMap[mapBegin][1])\n nodeToNeighboursMap.pop(mapBegin, None)\n\n while True:\n # Pick the last node in the current face loop and find its neighbours\n if faceLoop[-1] in nodeToNeighboursMap:\n tmp = faceLoop[-1]\n mapIter = nodeToNeighboursMap[faceLoop[-1]]\n # Check if we have not reached the end of the loop i.e. the first element\n if mapIter[0] != faceLoop[0] and mapIter[1] != faceLoop[0]:\n if mapIter[0] == faceLoop[-2]:\n faceLoop.append(mapIter[1])\n elif mapIter[1] == faceLoop[-2]:\n faceLoop.append(mapIter[0])\n else:\n nodeToNeighboursMap.pop(faceLoop[0], None)\n\n nodeToNeighboursMap.pop(tmp, None)\n else:\n faceLoop = np.array(faceLoop)\n faceLoops.append(faceLoop)\n break\n\n return faceLoops", "def eligible_edges(self):\n return self.edges" ]
[ "0.77327895", "0.6465779", "0.64423347", "0.6320274", "0.5931897", "0.5931611", "0.59037197", "0.5835765", "0.5821393", "0.5799897", "0.57672405", "0.57624716", "0.57101005", "0.57058245", "0.5704293", "0.56884634", "0.56703365", "0.5656793", "0.56268764", "0.5616624", "0.56064755", "0.56018305", "0.5601662", "0.55962837", "0.55960625", "0.55929565", "0.5581025", "0.55750084", "0.5564553", "0.55548716" ]
0.65483385
1
Samples from an HMM (Gaussian outputs) with provided parameters.
def sample_HMM(parameters, T, seed=None): K = parameters["num_states"] pi_0 = parameters["init_prob"] A = parameters["trans_matrix"] D = parameters["obs_dim"] mean = parameters["mean"] cov = parameters["cov"] np.random.seed(seed) # create empty numpy arrays to store samples states = np.empty(T, np.int32) obs = np.empty((T, D), np.float32) for t in range(T): if t == 0: # sample the first state from initial distribution states[t] = np.random.choice(K, p=pi_0) else: # get the next state based on transition matrix (the row # corresponding to the previous state) states[t] = np.random.choice(K, p=A[states[t - 1]]) # sample observation from the corresponding Gaussian distribution obs[t] = np.random.multivariate_normal( mean[states[t]], cov[states[t]]) return states, obs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def sample(self, like_params):\n\t\tassert len(like_params) == 1, f\"SphericalGaussianLikelihood only takes\"\\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\t# Make a Gaussian distribution.\n\t\tdist = Normal(\n\t\t\t\tlike_params,\n\t\t\t\tself.std_dev*torch.ones_like(like_params),\n\t\t)\n\t\tsamples = dist.sample()\n\t\treturn (samples,)", "def get_fitted_hmm(X, n_components):\n\tHMM = hmm.GaussianHMM(n_components=n_components, covariance_type=\"full\", n_iter=100)\n\tprint(HMM)\n\tHMM.fit(X)\n\tmodel = HMM\n\treturn model", "def paramSamples(self):\n\n if self._paramSamples is not None:\n return self._paramSamples\n timescale = self.mjdmax - self.mjdmin\n T0Vals = self.randomState.uniform(size=self.numSN) * timescale \\\n + self.mjdmin\n mB, x1, c, m = SALT2_MMDist(self.numSN)\n print(\"range of sampled mB\", mB.min(), mB.max())\n x0 = np.zeros(len(mB))\n mB += self.randomState.normal(loc=0., scale=self.Mdisp,\n size=self.numSN)\n H70cosmo = self.cosmo.clone(name='H70cosmo',\n H0=self.cosmo.H0 * (70/self.cosmo.H0.value))\n MB = mB + H70cosmo.distmod(self.zSamples).value - \\\n self.cosmo.distmod(self.zSamples).value\n model = sncosmo.Model(source='SALT2')\n for i, z in enumerate(self.zSamples):\n model.set(z=z, x1=x1[i], c=c[i])\n model.set_source_peakabsmag(MB[i], 'bessellB', 'ab',\n cosmo=self.cosmo)\n x0[i] = model.get('x0')\n mB[i] = model.source.peakmag('bessellB', 'ab')\n df = pd.DataFrame(dict(x0=x0, mB=mB, x1=x1, c=c,\n t0=T0Vals, z=self.zSamples, snid=self.snids))\n self._paramSamples = df\n return self._paramSamples", "def H0Sample(self, index=None, params=None):\n\t\tif index == None:\n\t\t\tindex = random.randint(0,self.numH0-1)\n\t\ts = ReadAIFF(self.dataDir+self.h0[index])\n\t\tP, freqs, bins = mlab.specgram(s, **params)\n\t\treturn P, freqs, bins", "def sample_gmm(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int,\n scaler: Dict[str, Union[pd.Series, xarray.Dataset]]) -> Dict[str, torch.Tensor]:\n setup = _SamplingSetup(model, data, \"gmm\")\n\n # force model into train mode if mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # make predictions:\n pred = model(data)\n\n # sample for different frequencies:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # initialize sample_points tensor for sampling:\n sample_points = torch.zeros((setup.batch_size_data, frequency_last_n, setup.number_of_targets, n_samples))\n sample_points *= torch.tensor(float('nan')) # set initial sample_points to nan\n\n # GMM has 3 parts: means (m/mu), variances (s/sigma), and weights (p/pi):\n m, s, p = pred[f'mu{freq_suffix}'], \\\n pred[f'sigma{freq_suffix}'], \\\n pred[f'pi{freq_suffix}']\n\n for nth_target in range(setup.number_of_targets):\n m_target = _subset_target(m[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n s_target = _subset_target(s[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n p_target = _subset_target(p[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n\n mask_nan = ~torch.isnan(m_target[:, -1, 0])\n if any(mask_nan): # skip if the complete mini-batch is invalid\n m_sub = torch.repeat_interleave(m_target[mask_nan, :, :], n_samples, dim=0)\n s_sub = torch.repeat_interleave(s_target[mask_nan, :, :], n_samples, dim=0)\n p_sub = torch.repeat_interleave(p_target[mask_nan, :, :], n_samples, dim=0)\n\n # sample values, handle negatives and add to sample points:\n values = _sample_gaussian_mixtures(np.ones(s_sub.shape, dtype=bool), m_sub, s_sub, p_sub)\n values = _handle_negative_values(\n setup.cfg,\n values,\n sample_values=lambda ids: _sample_gaussian_mixtures(ids, m_sub, s_sub, p_sub),\n scaler=scaler,\n nth_target=nth_target)\n values = values.view(-1, n_samples, frequency_last_n).permute(0, 2, 1)\n\n sample_points[mask_nan, :, nth_target, :] = values.detach().cpu()\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: sample_points})\n return samples", "def sample(self, params, argmax_sampling=False):\n means = params[:, :, 0]\n log_std = params[:, :, 1]\n\n if argmax_sampling:\n return means\n else:\n return torch.randn_like(means) * torch.exp(log_std) + means", "def H1Sample(self, index=None, params=None):\n\t\tif index == None:\n\t\t\tindex = random.randint(0,self.numH1-1)\n\t\t\tprint index\n\t\ts = ReadAIFF(self.dataDir+self.h1[index])\n\t\tP, freqs, bins = mlab.specgram(s, **params)\n\t\treturn P, freqs, bins", "def sample(self, params):\r\n old_model_trace = poutine.trace(self.model)(self.args, self.kwargs)\r\n traces = []\r\n t = 0\r\n i = 0\r\n while t < self.burn + self.lag * self.samples:\r\n i += 1\r\n # q(z' | z)\r\n new_guide_trace = poutine.block(\r\n poutine.trace(self.model))(old_model_trace, self.args, self.kwargs)\r\n # p(x, z')\r\n new_model_trace = poutine.trace(\r\n poutine.replay(self.model, new_guide_trace))(self.args, self.kwargs)\r\n # q(z | z')\r\n old_guide_trace = poutine.block(\r\n poutine.trace(\r\n poutine.replay(self.guide, old_model_trace)))(new_model_trace,\r\n self.args, self.kwargs)\r\n # p(x, z') q(z' | z) / p(x, z) q(z | z')\r\n logr = new_model_trace.log_pdf() + new_guide_trace.log_pdf() - \\\r\n old_model_trace.log_pdf() - old_guide_trace.log_pdf()\r\n rnd = pyro.sample(\"mh_step_{}\".format(i),\r\n Uniform(torch.zeros(1), torch.ones(1)))\r\n\r\n if torch.log(rnd).data[0] < logr.data[0]:\r\n # accept\r\n t += 1\r\n old_model_trace = new_model_trace\r\n if t <= self.burn or (t > self.burn and t % self.lag == 0):\r\n yield (new_model_trace, new_model_trace.log_pdf())", "def _generate_gmm_data(self):\n \n # randomly generate means and covariances of GMM if params not given\n if not self.params:\n self.params = generate_GMM_params(self.n_clusters, self.spread) \n\n # sample from the GMM\n return [np.random.multivariate_normal(mean, std, size=self.n_points) \\\n for mean, std in self.params]", "def MHSampler(h, data, burn=0, steps=1000000):\n\n h.compute_posterior(data)\n\n for i in xrange(steps):\n p, fb = h.propose()\n p.compute_posterior(data)\n\n if log(numpy.random.random()) < p.posterior_score - h.posterior_score - fb:\n h = p\n\n # yield the current sample\n if i>burn:\n yield h", "def set_params_hmm_exp1(hmm) :\n\thmm.length = 12\n\thmm.dims = [(2,3)]*hmm.length # (latent,emit) dimspace\n\thmm.emit = [\n\t\t[[0.6,0.2,0.2],[0.2,0.6,0.2]]\n\t]*hmm.length\n\thmm.trans = [\n\t\t[[0.7,0.3],[0.3,0.7]]\n\t]*hmm.length\n\thmm.seqmap = [{'a':0,'b':1}]*hmm.length\n\thmm.seqmap2 = [{0:'a',1:'b'}]*hmm.length\n\thmm.featmap = [{'H':0,'B':1,'L':2}]*hmm.length\n\thmm.initprob = [0.5,0.5]\n\thmm.trained = True", "def generate_samples(self, config, num_samples):\n tic = time.time()\n\n generator = GMM(**config)\n weights = torch.rand(config.num_components)\n generator.component_weights.set_(weights / weights.sum())\n generator.gaussian.means.set_(torch.randn(config.num_components, config.num_features))\n\n if config.covariance == 'diag':\n generator.gaussian.covars.set_(torch.rand(config.num_components, config.num_features))\n\n samples = generator.sample(num_samples)\n\n toc = time.time()\n print(f\"Generated {num_samples:,} samples in {toc-tic:.2f} seconds.\")\n\n return samples", "def draw_samples(self):\n if self._integrator == 'HMC': \n self.momentum = torch.distributions.Normal(torch.zeros_like(self.parameters), torch.ones_like(self.parameters)).sample()\n start = time.time()\n if (self._integrator == 'RMHMC'): #torch has trouble differentiating through repeated eigenvalues\n self.jitters = self.jitter * torch.rand(self.parameters.shape[0])\n self.jitters[0] = 0.\n self.jitters[1] = 0.\n self.potential_ = self.get_potential()\n self.hamiltonian_ = self.get_hamiltonian()\n self.momentum = self.resample_momenta(init=True)\n self.momenta.append(self.momentum)\n if self.shadow:\n self.shadow_ = self.get_shadow()\n finished = 0\n counter = 0\n if self.verbose:\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n else:\n# for _ in tqdm(range(self.n_samples)):\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n counter += 1\n if counter > self.n_samples * 0.05:\n counter = 0\n print('('+str(int((sample+1)/self.n_samples*100))+'% complete)', int(self.accepted),'of', int(self.accepted + self.rejected), 'accepted', '('+str(int((self.accepted)/(self.accepted+self.rejected)*100))+'%)')\n total = float(self.accepted + self.rejected)\n end = time.time()\n if total >= self.n_samples:\n self.completed = True\n self.elapsed += end-start\n print('\\n', int(self.accepted), ' of ', int(self.accepted + self.rejected), ' samples accepted in', self.elapsed, ' seconds (', 100 * self.accepted/total,'%).')\n return None\n else:\n self.degenerates +=1\n self.find_mode()\n self.parameters = params_init + torch.randn(self.parameters.shape[0])/100\n self.reinitiate_samples()\n self.resample_momenta(init = True)\n return None", "def get_h_given_v_samples(self, x):\n \n sig_input = T.dot(x, self.W) + self.bhid\n \n sig_output= T.nnet.sigmoid(sig_input)\n \n sample = self.theano_rand_gen.binomial(size= sig_output.shape,\n n=1, \n p= sig_output,\n dtype=theano.config.floatX)\n \n return [sig_input, sig_output, sample]", "def analyze_data(O, nstates, nsamples=1000, nobservations=None):\n\n # Time interval.\n tau = 0.001 # time interval (s) for plotting\n\n # Truncate O to number of observations.\n if nobservations:\n print \"Using only %d observations\" % nobservations\n O = [ o_t[0:nobservations] for o_t in O ]\n else:\n nobservations = len(O[0])\n\n # Generate MLHMM.\n print \"Generating MLHMM...\"\n estimator = bhmm.MLHMM(O, nstates)\n\n print \"Initial guess:\"\n print str(estimator.hmm.output_model)\n print estimator.hmm.transition_matrix\n print estimator.hmm.stationary_distribution\n\n # Plot initial guess.\n s_t = None\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-guess-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(estimator.hmm, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n print \"Fitting HMM...\"\n mle = estimator.fit()\n\n # Plot.\n s_t = mle.hidden_state_trajectories[0]\n import numpy as np\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-mlhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(mle, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n # Initialize BHMM with MLHMM model.\n print \"Sampling models from BHMM...\"\n sampler = bhmm.BHMM(O, nstates, initial_model=mle)\n bhmm_models = sampler.sample(nsamples=nsamples, save_hidden_state_trajectory=False)\n\n # Generate a sample saving a hidden state trajectory.\n final_models = sampler.sample(nsamples=1, save_hidden_state_trajectory=True)\n\n # Plot final BHMM sample.\n model = final_models[0]\n s_t = model.hidden_state_trajectories[0]\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-bhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(model, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n return [mle, bhmm_models]", "def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples", "def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break", "def mean_sigma(h):\n h.Fit(\"gaus\", \"q\")\n result_fit = h.GetFunction(\"gaus\")\n mean = result_fit.GetParameter(1)\n sigma = result_fit.GetParameter(2)\n return mean, sigma", "def sample_1d_gmm(n_samples=200, n_components=3, sigma=.1, random_state=None):\n rng = check_random_state(random_state)\n\n means = np.arange(n_components)\n pi = np.ones(n_components) / n_components\n\n y = rng.choice(np.arange(n_components), p=pi, size=n_samples)\n\n X = np.random.normal(size=n_samples, scale=sigma)\n X += y\n\n params = {'means': means, 'sigma': sigma, 'pi': pi}\n\n return X.reshape(-1, 1), y, params", "def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def gibbs_hvh(self, h0_sample):\n v1_mean, v1_sample = self.sample_v_given_h(h0_sample)\n h1_mean, h1_sample = self.sample_h_given_v(v1_sample)\n return v1_mean, v1_sample, h1_mean, h1_sample", "def sample_mu(self, val) -> None:\n\n # get data\n data = self.data.reshape((1, -1))\n\n # get values\n gain = val.gain\n states = val.states\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n num_data = val.num_data\n num_rois = val.num_rois\n num_states = val.num_states\n\n # initialze variables\n num_vars = num_states + num_rois\n idx = np.where(val.mu_flor_mean > 0)[0]\n # shape\n shape = np.zeros((num_vars, 1))\n shape[:num_states, 0] = mu_flor_shape[:]\n shape[num_states:, 0] = mu_back_shape\n # scale\n scale = np.zeros((num_vars, 1))\n scale[idx, 0] = mu_flor_mean[idx] / mu_flor_shape[idx]\n scale[num_states:, 0] = (mu_back_mean / mu_back_shape)[:]\n\n # initialize a mu vector containing the variables we wish to sample, mu_flor and mu_back\n q = np.zeros((num_vars, 1))\n q[:num_states, 0] = mu_flor[:]\n q[num_states:, 0] = mu_back[:]\n q_old = q.copy()\n idy = q > 0 # keep track of which states are dark (we only sample bright states)\n num_var = q.shape[0]\n\n # hmc dynamics variables\n h = np.random.exponential() / 100\n masses = (1 + np.random.pareto(1, size=q.shape))\n masses_inv = np.zeros(shape=masses.shape) # negative mass is interpretted as an unchanging variable\n masses_inv[masses > 0] = 1 / masses[masses > 0]\n num_steps = np.random.poisson(25)\n\n # create populations array\n pops = np.zeros((num_vars, num_rois * num_data))\n \"\"\"\n pops is an array such that each element i, j corresponds to the \n multiplicitive factor in front of q[i] for data point j in the \n likelihood. For example, if in ROI 1 at time level 17 there are two\n fluorophores in the bright state, then we find the element, j,\n corresponding to ROI 1 and time level 17, and we find the element,\n i, corresponding to the bright state, and we set q[i,j]=2 (because\n there are two bright fluorophores), then we would find the i\n corresponding to the background brightness of ROI 1, and we would\n set this q[i,j]=1 (the multiplicitive factor in front of the \n background brightness is 1 when it is the corresponding ROI and 0\n otherwise).\n \"\"\"\n for r in range(num_rois):\n idx = np.arange(r*num_data, (r+1)*num_data)\n pops[:num_states, idx] = states_to_pops(states[r, :, :], num_states)\n pops[num_states + r, idx] = 1\n\n # the conditional probability for the mu vector\n def probability(q_, p_):\n if np.sum(q_ < 0) > 0:\n prob = -np.inf\n else:\n prob = (\n np.sum(stats.gamma.logpdf(data, a=q_.T @ pops, scale=gain)) # likelihood\n + np.sum(stats.gamma.logpdf(q_[idy], a=shape[idy], scale=scale[idy])) # prior\n + np.sum(stats.norm.logpdf(p_[idy], loc=0, scale=np.sqrt(masses[idy]))) # momentum\n )\n return prob\n\n # the gradient of the Hamiltonian with respect to the mu_vector\n def dH_dq(q_):\n if np.any(q_ < 0):\n \"\"\"\n In the event that q_new becomes negative, fast_digamma becomes\n slow. Since q should never be negative anyway, there is no\n need for further computation and we can skip this step knowing\n that this value of q will be rejected anyway.\n \"\"\"\n return q_\n q_new = np.zeros(q_.shape)\n q_new[idy] = (\n (shape[idy] - 1) / q_[idy] - 1 / scale[idy]\n + (pops @ (np.log(data / gain) - fast_digamma(q_.T @ pops)).T)[idy]\n )\n return q_new\n\n # sample momentum\n p = np.random.randn(num_var, 1) * np.sqrt(masses)\n p_old = p.copy()\n\n # run the HMC\n for i in range(num_steps):\n p = p + .5 * h * dH_dq(q)\n q = q + h * p * masses_inv\n p = p + .5 * h * dH_dq(q)\n\n # find acceptance ratio\n P_new = probability(q, p)\n P_old = probability(q_old, p_old)\n if (P_new - P_old) < np.log(np.random.rand()):\n q = q_old\n\n # update the new mu values\n val.mu_flor[:] = q[:num_states, 0]\n val.mu_back[:] = q[num_states:, 0]\n\n return", "def estimate_implicit_moments(config, shared, task_id, hnet, hhnet, num_samples,\n device):\n theta = None\n if hhnet is not None:\n theta = hhnet.forward(cond_id=task_id)\n\n samples = torch.empty((num_samples, hnet.num_outputs)).to(device)\n\n for j in range(num_samples):\n z = torch.normal(torch.zeros(1, shared.noise_dim), config.latent_std).\\\n to(device)\n\n weights = hnet.forward(uncond_input=z, weights=theta)\n\n samples[j, :] = torch.cat([p.detach().flatten() for p in weights])\n\n sample_mean = samples.mean(dim=0)\n sample_std = samples.std(dim=0)\n\n return sample_mean, sample_std", "def sample_gmm_given_params(means, covariances, weights,\n n_samples=100, random_state=None):\n\n rng = check_random_state(random_state)\n\n n_components = len(weights)\n n_features = means.shape[1]\n\n # sample cluster memberships\n y = rng.choice(a=range(n_components), size=n_samples,\n replace=True, p=weights)\n\n # sample data\n X = np.zeros((n_samples, n_features))\n for i in range(n_samples):\n\n X[i, :] = rng.multivariate_normal(mean=means[y[i], :],\n cov=covariances[y[i], :, :])\n\n return X, y", "def sample(self, num_samples, save_steps, file_path):\n\n if not self.gammas:\n self.generate_gammas()\n vel = vel_param([0.1, 0.1])\n delta = 1.5\n n = 10\n m = num_samples\n # initiate an HMC instance\n hmc = HMC(self.xi, vel, delta, n, m)\n\n gammas_xi = [[self.gammas[i][s].copy() - 1] for i in range(len(self.gammas)) for s in\n range(len(self.gammas[i]))]\n\n # perform the sampling\n hmc.HMC(gammas_xi, self.saliencies, self.fix_dists_2, self.dist_mat_per_fix)\n samples_xi = hmc.get_samples()\n\n if save_steps:\n with open(file_path, 'wb') as f:\n pickle.dump([samples_xi], f)\n\n return samples_xi", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(self, args):\n self.z_mean, self.z_log_var = args\n batch = K.shape(self.z_mean)[0]\n dim = K.int_shape(self.z_mean)[1]\n epsilon = K.random_uniform(shape=(batch, dim))\n \n return self.z_mean + K.exp(0.5 * self.z_log_var) * epsilon", "def _construct_sample_from_input(self):\n xi = T.matrix()\n xo = T.matrix()\n irs = self.ir_steps\n oputs = [self.obs_transform(self.s0)]\n oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])\n _, hi_zmuv = self._construct_zmuv_samples(xi, 1)\n sample_func = theano.function(inputs=[xi, xo], outputs=oputs, \\\n givens={ self.x_in: xi, \\\n self.x_out: xo, \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.scan_updates)\n def conditional_sampler(XI, XO=None, guided_decoding=False):\n XI = to_fX( XI )\n if XO is None:\n XO = XI\n XO = to_fX( XO )\n # set model to desired generation mode\n old_switch = self.train_switch.get_value(borrow=False)\n if guided_decoding:\n # take samples from guide policies (i.e. variational q)\n self.set_train_switch(switch_val=1.0)\n else:\n # take samples from model's generative policy\n self.set_train_switch(switch_val=0.0)\n # draw guided/unguided conditional samples\n model_samps = sample_func(XI, XO)\n # set model back to either training or generation mode\n self.set_train_switch(switch_val=old_switch)\n return model_samps\n return conditional_sampler" ]
[ "0.64712185", "0.6112422", "0.60827136", "0.60287106", "0.60221756", "0.59152335", "0.58934444", "0.5888099", "0.58843905", "0.58335674", "0.58257145", "0.5813412", "0.5784386", "0.5764342", "0.5741552", "0.5711274", "0.57028776", "0.5673057", "0.5638518", "0.5632263", "0.5621288", "0.5606603", "0.5604122", "0.5594646", "0.55859506", "0.55839103", "0.5564327", "0.55638194", "0.5562069", "0.556125" ]
0.6981797
0
Disconnect signals from the control buttons. This must be done before new signals can be added properly.
def _disconnect_buttons(cls): try: cls.btn_startpause.clicked.disconnect() except RuntimeError: pass try: cls.btn_stopsave.clicked.disconnect() except RuntimeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _after_disconnect(self):\r\n _debug('GUISignalGenerator: _after_disconnect()')\r\n self.button_sweep.disable()\r\n self.button_reset.disable()\r\n self.button_send_list.disable()\r\n self.button_rf.set_checked(False, block_events=True).disable()\r\n self.number_dbm.disable()\r\n self.number_frequency.disable()\r\n self.number_list_index.disable()\r\n self.combo_mode.disable()", "def disconnect(self):\n for signal, models in six.iteritems(self._registry):\n for model, keys in six.iteritems(models):\n signal.disconnect(sender=model, weak=False, dispatch_uid=signal)\n self._registry = {}", "def disconnectLayerSignals(self):\n if self.prevLayer:\n try:\n self.prevLayer.featureAdded.disconnect(self.setAttributesFromButton)\n self.prevLayer.editCommandEnded.disconnect(self.updateAttributesAfterAdding)\n self.prevLayer.editFormConfig().setSuppress(QgsEditFormConfig.SuppressOff)\n except:\n pass", "def disconnectAll(self, parentObject):\n\t\tfor widget in parentObject.findChildren(QtWidgets.QWidget):\n\t\t\tif widget.property('xmlTag'):\n\t\t\t\ttry:\n\t\t\t\t\twidget.disconnect()\n\t\t\t\t\t# verbose.debug(\"Disconnect signals from %s\" % widget.objectName())\n\t\t\t\texcept TypeError:\n\t\t\t\t\tpass\n\t\t\t\t\t# verbose.debug(\"No signals to disconnect from %s\" % widget.objectName())", "def unregister_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.unsubscribe(self, callback)", "def _remove_buttons(self, gui):\n gui.greet_button.pack_forget()\n gui.close_button.pack_forget()\n gui.buttons_on.set(False)", "def _unblock_signals(self):\n self._minimum_length_spinbox.blockSignals(False)\n self._maximum_length_spinbox.blockSignals(False)\n self._seed_le.blockSignals(False)", "def disconnect_signals(self, *args):\n children = self.children()\n Pair.disconnect_signals(self, *args)\n for child in children:\n child.disconnect_signals(*args)", "def register_bottombar_button_signals(self):\n self.btn_btmbar_next.released.connect(self.btn_next_released)\n self.btn_btmbar_back.released.connect(self.btn_back_released)\n self.btn_btmbar_next.hide()\n self.btn_btmbar_back.hide()", "def disconnect(self):\n self.is_connected = False\n self.mySerialConnection = None\n self.connect_button.config(command=self.connect);\n self.connect_button.config(text=\"Connect\")", "def disconnect_events(self):\n for c in self._cids:\n self.canvas.mpl_disconnect(c)", "def disconnect_controls_from_player(self) -> None:\n if self._connected_to_player:\n self._connected_to_player.resetinput()\n self._connected_to_player = None\n\n # Send releases for anything in case its held.\n self.on_move_up_down(0)\n self.on_move_left_right(0)\n self.on_hold_position_release()\n self.on_jump_release()\n self.on_pickup_release()\n self.on_punch_release()\n self.on_bomb_release()\n self.on_run(0.0)\n self.on_fly_release()\n else:\n print('WARNING: disconnect_controls_from_player() called for'\n ' non-connected player')", "def disconnect(self):\n self.controlProtocol.disconnect()", "def deactivate(self):\n SignalPlug.deactivate(self)\n self.killconnections()\n self.killtimers()", "def disconnect(signal, handler=None):\n if signal not in REGISTRY:\n return\n if handler:\n REGISTRY[signal].disconnect(handler)\n return\n REGISTRY[signal].registry.clear()\n del REGISTRY[signal]", "def disconnected_from_listeners(signal):\n listeners = list(signal.receivers_for(ANY))\n for listener in listeners:\n signal.disconnect(listener)\n yield\n for listener in listeners:\n signal.connect(listener)", "def stop(self, signal):\n pass", "def stop(self, signal):\n pass", "def _unregisterConnect(self, function):\n self._sig_connect.unsubscribe(function)", "def disconnect(controller_widget, control_name, control_instance):\n # Check if the control is connected\n if control_instance.connected:\n control_instance.controller_widget.disconnect()\n # Update the list control connection status\n control_instance.connected = False", "def hide_control_buttons(self):\n self.settings_button.hide()\n self.radio_button.hide()\n self.blank_button.hide()\n self.close_button.hide()", "def delete_button_callback(self, button):\n\t\tRPIO.del_interrupt_callback(button)", "def _clean(self):\n\t\tfor hid in self.handlers_id:\n\t\t\tself.obj.handler_disconnect(hid)", "def controlClear(self):\n self.control = {\"QLabel\": [], \"QTabWidget\": [], \"QPushButton\": [], \"QTextEdit\": [],\n \"QRadioButton\": [], \"QComboBox\": [], \"QSpinBox\": [], \"QTableWidget\": [], \"QLCDNumber\": []}", "def empty_signal(self, *args, **kwargs):\n pass", "def release_control(self):\n pass", "def uninstall_handle_input(self):\n self.active = False", "def unplug(self):\n return signal_base_unplug(self.obj)", "def signal_handler(sig, frame):\r\n print('You pressed Control+C')\r\n led.off()\r\n sys.exit(0)", "def releaseKeyButtons(self):\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])" ]
[ "0.7075591", "0.6849481", "0.6808928", "0.675683", "0.6733835", "0.66099083", "0.6497862", "0.649399", "0.6459956", "0.642244", "0.6315134", "0.63065934", "0.6289942", "0.62833315", "0.62390184", "0.6109949", "0.6098552", "0.6098552", "0.60809314", "0.6066603", "0.606277", "0.60477614", "0.60395336", "0.5980108", "0.5941706", "0.59391093", "0.59264416", "0.5902399", "0.58917713", "0.5891106" ]
0.7565067
0
Set buttons to stoppedclock state.
def _set_mode_stopped(cls): cls._disconnect_buttons() cls.btn_startpause.setText("Start") cls.btn_startpause.setIcon(QIcon.fromTheme("media-playback-start")) cls.btn_startpause.setWhatsThis("Start a new timer.") cls.btn_startpause.clicked.connect(cls.start) cls.btn_stopsave.setIcon(QIcon.fromTheme(None)) cls.btn_stopsave.setText("Stopped") cls.btn_stopsave.setWhatsThis("Timer is already stopped.") cls.btn_stopsave.setEnabled(False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pauseRunning(self):\r\n self.start_button['state'] = 'normal'\r\n self.pause_button['state'] = 'disabled'\r\n self.running = False", "def _set_mode_running(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Pause\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-pause\"))\n cls.btn_startpause.setWhatsThis(\"Pause timer.\")\n cls.btn_startpause.clicked.connect(cls.pause)\n\n cls.btn_stopsave.setText(\"Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.prompt_stop)\n cls.btn_stopsave.setEnabled(True)", "def setup_button_stop(self):\n stop_icon = tk.PhotoImage(file = self.stop_icon)\n self.button_stop = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = stop_icon,\n command=self.reset_world)\n self.button_stop.image = stop_icon\n self.button_stop.grid(row = 0, column = 4, sticky=tk.W)", "def stopButtonPressed(self):\n\n self.booleanStartButtonPressed = False # For RTLE's updateLabel function to check\n\n self.save_file = open(os.path.join(args.parent_img_path, self.save_file_name), \"a\")\n self.save_file.write(\"\\nStop Button Pressed\\n\")\n self.save_file.close()\n\n print(\"Stop button has been pressed!\")\n\n self.stopButton.setEnabled(False)\n self.startButton.setEnabled(True)\n self.reset.setEnabled(True)\n if self.showCTWM:\n self.setPointsCTWM.stopit() # stops the algorithm\n if self.showWHM:\n time.sleep(0.1)\n self.setPointsWHM.stopit()\n\n # self.save_file.close()", "def on_stop_clicked(self):\n self.stop_button.setEnabled(False)\n if self.stepping == False:\n self.stepping = True\n self.step_event.set()", "def _set_mode_prompt_stop(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Resume\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Resume timer from current time.\")\n cls.btn_startpause.clicked.connect(cls.resume)\n\n cls.btn_stopsave.setText(\"Confirm Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.stop)\n cls.btn_stopsave.setEnabled(True)", "def _set_mode_paused(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Resume\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Resume timer from current time.\")\n cls.btn_startpause.clicked.connect(cls.resume)\n cls.btn_stopsave.setText(\"Stop\")\n\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.prompt_stop)\n cls.btn_stopsave.setEnabled(True)", "def stop_button(self):\r\n self.set_val(\"display_move\") # Force action\r\n self.update_settings()\r\n self.is_action = True\r\n self.is_pause = True\r\n if self.paths_gen is None:\r\n raise SelectError(\"paths_gen connection has NOT been setup\")\r\n self.paths_gen.stop_gen()", "def all_off(self):\n\n for b in self.gamebuttons:\n b.but_off()", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def _startbuttontimers(self):\n changetimes = {}\n for b in self._buttons:\n if not b._times:\n continue\n t0, t1 = b._times\n changetimes[t0] = changetimes[t1] = 1\n for t in changetimes.keys():\n mw_globals.toplevel.settimer(t, (self._window._buttonschanged, ()))", "def toggle_pause(self):\n self.m_btn_pause = not self.m_btn_pause", "def _button_stop_fired(self):\n self.taking_spectra = False", "def deactivate_buts(self):\n\n for b in self.gamebuttons:\n b.deactivate()", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def _disconnect_buttons(cls):\n try:\n cls.btn_startpause.clicked.disconnect()\n except RuntimeError:\n pass\n\n try:\n cls.btn_stopsave.clicked.disconnect()\n except RuntimeError:\n pass", "def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)", "def stop(self):\n #productive\n profprint()\n self.logic.changeCursor(0)\n self.removeObservers()\n self.fiducialObturatorButton.checked = 0\n self.fiducialButton.checked = 0\n self.validationNeedleButton.checked = 0", "def _on_stop_cycle(self, kwargs: dict) -> None:\n for handle in (HANDLE_TOGGLE_IN_WINDOW, HANDLE_TOGGLE_OUT_WINDOW):\n if handle not in self.handles:\n continue\n name = self.handles.pop(handle)\n self.cancel_timer(name)\n\n self.toggle(opposite_of=self.properties[CONF_STATE])", "def reset_game(self):\n self._buttons_active = False\n self._laser_label.clear()\n self.add_items_labels()\n self.add_question_marks()\n QtCore.QTimer.singleShot(conf.second_display_duration,\n self.enter_phase_two)", "def reset_buttons(self):\n global intervalAdd_\n global intervalDel_\n global annotationAdd_\n global annotationDel_\n\n # self.active_mode = 'default', 'intervalAdd', 'intervalDel'\n if self.active_mode != 'intervalAdd':\n self.push2_1.setChecked(False)\n intervalAdd_ = False\n\n if self.active_mode != 'intervalDel':\n self.push2_2.setChecked(False)\n intervalDel_ = False\n\n if self.active_mode != 'annotationAdd':\n self.push1_1.setChecked(False)\n annotationAdd_ = False\n\n if self.active_mode != 'annotationDel':\n self.push1_2.setChecked(False)\n annotationDel_ = False", "def on_Off_class_button_clicked(self):\n self.timer2.stop()\n self.stu_pic_label_7.clear()\n self.rec_label_7.clear()\n self.CWA_Sno_List.clear()\n self.textEdit.clear()", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def __update_pause_btn(self, value):\n self.pauseBtn.blockSignals(True)\n self.pauseBtn.setChecked(value)\n self.__update_pause_icon()\n self.pauseBtn.blockSignals(False)", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "def OnButton2(self):\n if self.timer.IsRunning() & self.timerNotZero:\n self.timer.Stop()\n self.button2.SetBitmap(self.button2play)\n elif self.timerNotZero:\n self.timer.Start()\n self.button2.SetBitmap(self.button2pause)", "def start_stop(self, event):\n self.start_button.SetLabel('Measuring')\n self.start_button.Enable = False\n # Do nothing as of now. Will call measuring functions later.\n self.txt_info_box.SetLabel('Starting measurement.')\n time.sleep(2)\n self.start_button.SetLabel('Start measurement')\n self.start_button.Enable = True\n self.txt_info_box.SetLabel('Completed measurement.')\n self.result_box.SetLabel(\"100.00\")", "def disable_buttons(self):\n\t\tself.cancel.set_sensitive(False)\n\t\tself.logout.set_sensitive(False)\n\t\tself.suspend.set_sensitive(False)\n\t\tself.reboot.set_sensitive(False)\n\t\tself.shutdown.set_sensitive(False)", "def stop(self):\r\n # productive\r\n profprint()\r\n self.logic.changeCursor(0)\r\n self.removeObservers()\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.validationNeedleButton.checked = 0", "def _on_stop_cycle(self, kwargs: dict) -> None:\n self._cancel_automation()\n self.toggle(state=\"off\")" ]
[ "0.6993939", "0.693941", "0.6659396", "0.66565657", "0.6656494", "0.66300124", "0.6577831", "0.62724304", "0.61642003", "0.615654", "0.6145925", "0.61336136", "0.6110425", "0.60946417", "0.60826224", "0.6073976", "0.6063515", "0.60490686", "0.6044271", "0.6033677", "0.6013967", "0.6008007", "0.5986392", "0.597107", "0.59531647", "0.59311956", "0.5923356", "0.59146756", "0.5909279", "0.58908993" ]
0.7900878
0
Set buttons to saveorreset state.
def _set_mode_save(cls): cls._disconnect_buttons() cls.btn_startpause.setText("Reset") cls.btn_startpause.setIcon(QIcon.fromTheme("edit-undo")) cls.btn_startpause.setWhatsThis("Discard time and reset timer.") cls.btn_startpause.clicked.connect(cls.prompt_reset) cls.btn_stopsave.setIcon(QIcon.fromTheme("document-save")) cls.btn_stopsave.setWhatsThis("Save time and notes to log.") cls.btn_stopsave.setText("Save") cls.btn_stopsave.setEnabled(True) cls.btn_stopsave.clicked.connect(cls.save)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_buttons_values(self):\r\n pass", "def reset_buttons(self):\n global intervalAdd_\n global intervalDel_\n global annotationAdd_\n global annotationDel_\n\n # self.active_mode = 'default', 'intervalAdd', 'intervalDel'\n if self.active_mode != 'intervalAdd':\n self.push2_1.setChecked(False)\n intervalAdd_ = False\n\n if self.active_mode != 'intervalDel':\n self.push2_2.setChecked(False)\n intervalDel_ = False\n\n if self.active_mode != 'annotationAdd':\n self.push1_1.setChecked(False)\n annotationAdd_ = False\n\n if self.active_mode != 'annotationDel':\n self.push1_2.setChecked(False)\n annotationDel_ = False", "def turn_on_buttons(self):\n self.edit_button.setEnabled(True)\n self.delete_button.setEnabled(True)", "def _updateButtonStates(self):\r\n \r\n indexes = self.propertiesTableView.selectionModel().selectedIndexes()\r\n self._setInitialButtonState()\r\n if not self._model.isReadOnly and len(indexes) > 0:\r\n canBeCleared = isDeletable = isRevertable = True\r\n for index in indexes:\r\n if index.isValid():\r\n canBeCleared &= self._model.canBeCleared(index)\r\n isDeletable &= self._model.isDeleteable(index)\r\n isRevertable &= self._model.isRevertable(index)\r\n \r\n # Enable / disable buttons\r\n if len(indexes) == 1:\r\n self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)\r\n self.clearValueButton.setEnabled(canBeCleared)\r\n self.deleteButton.setEnabled(isDeletable)\r\n self.revertButton.setEnabled(isRevertable)\r\n self.addButton.setEnabled(True)", "def setup_mode_saver(self):\n saver_icon = tk.PhotoImage(file = self.saver_icon)\n self.saver_button = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = saver_icon,\n command = self.save_mode)\n self.saver_button.image = saver_icon\n self.saver_button.grid(row = 0, column = 1, sticky = tk.W)", "def onSaveNotesButton(self, button):\n pass", "def _create_actions(self):\n self.save_button.clicked.connect(self._save)", "def _setInitialButtonState(self):\r\n \r\n self.addButton.setEnabled(not self._model.isReadOnly)\r\n self.editButton.setEnabled(False)\r\n self.clearValueButton.setEnabled(False)\r\n self.deleteButton.setEnabled(False)\r\n self.revertButton.setEnabled(False)", "def enable_btns(self):\n self.saveBtn.setEnabled(True)\n self.openVideoBtn.setEnabled(True)\n self.openAnnotationBtn.setEnabled(True)\n self.resetBtn.setEnabled(True)\n self.speedCombo.setEnabled(True)\n self.newFileBtn.setEnabled(True)\n self.HelpBtn.setEnabled(True)", "def init_all_buttons(self) -> bool:\n raise NotImplementedError", "def update_buttons(self):\n # Enable the Add/Remove step buttons if a Generator is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=17)\n\n\t\t# Wetek Button\n self.wetek_button = pyxbmct.RadioButton('')\n self.placeControl(self.wetek_button, 9, 1, rowspan=3, columnspan=3)\n self.connect(self.wetek_button, self.wetek_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetek', 2) == 1:\n self.wetek_button.setSelected(True)\n else:\n self.wetek_button.setSelected(False)\n wetek = pyxbmct.Image(addonfolder+artsfolder+'/weteksmall.png')\n self.placeControl(wetek, 9, 1, rowspan=3, columnspan=3)\n\n\t\t# K Button\n self.k_button = pyxbmct.RadioButton('')\n self.placeControl(self.k_button, 9, 5, rowspan=3, columnspan=3)\n self.connect(self.k_button, self.k_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k', 2) == 1:\n self.k_button.setSelected(True)\n else:\n self.k_button.setSelected(False)\n k = pyxbmct.Image(addonfolder+artsfolder+'/ksmall.png')\n self.placeControl(k, 9, 5, rowspan=3, columnspan=3)\n\n\t\t# Khadas Button\n self.khadas_button = pyxbmct.RadioButton('')\n self.placeControl(self.khadas_button, 9, 9, rowspan=3, columnspan=3)\n self.connect(self.khadas_button, self.khadas_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadas', 2) == 1:\n self.khadas_button.setSelected(True)\n else:\n self.khadas_button.setSelected(False)\n khadas = pyxbmct.Image(addonfolder+artsfolder+'/khadasmall.png')\n self.placeControl(khadas, 9, 9, rowspan=3, columnspan=3)\n\n\t\t# Generic Button\n self.generic_button = pyxbmct.RadioButton('')\n self.placeControl(self.generic_button, 9, 13, rowspan=3, columnspan=3)\n self.connect(self.generic_button, self.generic_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'generic', 2) == 1:\n self.generic_button.setSelected(True)\n else:\n self.generic_button.setSelected(False)\n generic = pyxbmct.Image(addonfolder+artsfolder+'/genericsmall.png')\n self.placeControl(generic, 9, 13, rowspan=3, columnspan=3)\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 16, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def toggle_action_buttons(self, val):\n self.copyButton.setEnabled(val)\n self.uploadButton.setEnabled(val)\n self.file_formats_container.setEnabled(val)", "def update_buttons(self):\n # Enable the Add/Remove/Up/Down measurements buttons if a Survey is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)\n \n # Enable the Add/Remove condition buttons if a Measurement is selected\n #enable = len(list(self.mgr.obj.measurements)) > 0\n enable = self.measurementTableWidget.rowCount() > 0\n self.addConditionButton.setEnabled(enable)\n self.removeConditionButton.setEnabled(enable)", "def ResetButtons(self):\r\n\r\n floating = self.HasFlag(self.optionFloating)\r\n self.buttons = []\r\n\r\n if not floating and self.HasMinimizeButton():\r\n button = AuiPaneButton(AUI_BUTTON_MINIMIZE)\r\n self.buttons.append(button)\r\n \r\n if not floating and self.HasMaximizeButton():\r\n button = AuiPaneButton(AUI_BUTTON_MAXIMIZE_RESTORE)\r\n self.buttons.append(button)\r\n\r\n if not floating and self.HasPinButton():\r\n button = AuiPaneButton(AUI_BUTTON_PIN)\r\n self.buttons.append(button)\r\n\r\n if self.HasCloseButton():\r\n button = AuiPaneButton(AUI_BUTTON_CLOSE)\r\n self.buttons.append(button)", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbs.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Hispasat\n self.hispasat_button = pyxbmct.RadioButton('')\n self.placeControl(self.hispasat_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.hispasat_button, self.hispasat_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'hispasat', 2) == 1:\n self.hispasat_button.setSelected(True)\n else:\n self.hispasat_button.setSelected(False)\n hispasat = pyxbmct.Image(addonfolder+artsfolder+'/hispasat.png')\n self.placeControl(hispasat, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Astra\n self.astra_button = pyxbmct.RadioButton('')\n self.placeControl(self.astra_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.astra_button, self.astra_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'astra', 2) == 1:\n# self.astra_button.setSelected(True)\n# else:\n# self.astra_button.setSelected(False)\n astra = pyxbmct.Image(addonfolder+artsfolder+'/astra.png')\n self.placeControl(astra, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Hotbird\n self.hotbird_button = pyxbmct.RadioButton('')\n self.placeControl(self.hotbird_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.hotbird_button, self.hotbird_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'hotbird', 2) == 1:\n# self.hotbird_button.setSelected(True)\n# else:\n# self.hotbird_button.setSelected(False)\n hotbird = pyxbmct.Image(addonfolder+artsfolder+'/hotbird.png')\n self.placeControl(hotbird, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbc.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Nos\n self.nos_button = pyxbmct.RadioButton('')\n self.placeControl(self.nos_button, 10, 3, rowspan=1, columnspan=4)\n self.connect(self.nos_button, self.nos_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nos', 2) == 1:\n self.nos_button.setSelected(True)\n else:\n self.nos_button.setSelected(False)\n nos = pyxbmct.Image(addonfolder+artsfolder+'/nos.png')\n self.placeControl(nos, 10, 3, rowspan=1, columnspan=4)\n\n\t\t# Nos Madeira\n self.madeira_button = pyxbmct.RadioButton('')\n self.placeControl(self.madeira_button, 12, 6, rowspan=1, columnspan=4)\n self.connect(self.madeira_button, self.madeira_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'madeira', 2) == 1:\n self.madeira_button.setSelected(True)\n else:\n self.madeira_button.setSelected(False)\n madeira = pyxbmct.Image(addonfolder+artsfolder+'/madeira.png')\n self.placeControl(madeira, 12, 6, rowspan=1, columnspan=4)\n\n\t\t# Nowo\n self.nowo_button = pyxbmct.RadioButton('')\n self.placeControl(self.nowo_button, 10, 9, rowspan=1, columnspan=4)\n self.connect(self.nowo_button, self.nowo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nowo', 2) == 1:\n self.nowo_button.setSelected(True)\n else:\n self.nowo_button.setSelected(False)\n nowo = pyxbmct.Image(addonfolder+artsfolder+'/nowo.png')\n self.placeControl(nowo, 10, 9, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def SetButton(self, b):\r\n \r\n self.button = b", "def init_saveas_button(self):\n def save():\n \"\"\"\n function to invoke different save routines\n \"\"\"\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"Scalable Vector Graphics\", \"*.svg\"),\n (\"Postscript\", \"*.ps\"),\n (\"Portable Network Graphics\", \"*.png\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n extension = re.search(r\"\\.[\\w]+$\", file_name)[0]\n if extension == '.png':\n self.parent_class.save_png(file_name)\n elif extension == \".ps\":\n self.parent_class.save_postscript(file_name)\n elif extension == \".svg\":\n self.parent_class.save_canvas_svg(file_name)\n else:\n raise TypeError(\"Unknown Filetype\")\n\n self.buttons[\"btn_save_as\"] = Button(\n self.frame, text=\"Save Canvas As\", command=save)\n self.buttons[\"btn_save_as\"].grid(row=5, column=0)", "def change_ops_state(self, state):\n for op_button in self.operators.values():\n op_button['state'] = state", "def _add_buttons(self, gui):\n gui.greet_button.pack()\n gui.close_button.pack()\n gui.buttons_on.set(True)", "def set_to_current_value():\n self.is_binding = False\n button['fg'] = 'black'\n nonlocal button_save_string\n if button_save_string == 'left':\n if controller.slide_left_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_left_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_left_control\n\n elif button_save_string == 'right':\n if controller.slide_right_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_right_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_right_control\n\n elif button_save_string == 'up':\n if controller.slide_up_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_up_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_up_control\n\n elif button_save_string == 'down':\n if controller.slide_down_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_down_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_down_control", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbt.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# TDT\n self.tdt_button = pyxbmct.RadioButton('')\n self.placeControl(self.tdt_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.tdt_button, self.tdt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'tdt', 2) == 1:\n self.tdt_button.setSelected(True)\n else:\n self.tdt_button.setSelected(False)\n tdt = pyxbmct.Image(addonfolder+artsfolder+'/tdt.png')\n self.placeControl(tdt, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Meo\n self.meo_button = pyxbmct.RadioButton('')\n self.placeControl(self.meo_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.meo_button, self.meo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'meo', 2) == 1:\n self.meo_button.setSelected(True)\n else:\n self.meo_button.setSelected(False)\n meo = pyxbmct.Image(addonfolder+artsfolder+'/meo.png')\n self.placeControl(meo, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Vodafone\n self.vodafone_button = pyxbmct.RadioButton('')\n self.placeControl(self.vodafone_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.vodafone_button, self.vodafone_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'vodafone', 2) == 1:\n self.vodafone_button.setSelected(True)\n else:\n self.vodafone_button.setSelected(False)\n vodafone = pyxbmct.Image(addonfolder+artsfolder+'/vodafone.png')\n self.placeControl(vodafone, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def activate_buts(self):\n\n for b in self.gamebuttons:\n b.activate()", "def setup_buttons(self):\n confirm = self.centre.findChild(QPushButton, \"confirmBtn\")\n confirm.clicked.connect(partial(self.confirmed))\n cancel = self.centre.findChild(QPushButton, \"cancelBtn\")\n cancel.clicked.connect(partial(self.controller.show_selector_menu))", "def create_buttons(self: object) -> None:\n buttons = {\"BWT\": Button(\n self, text=\"BWT\", command=self.bwt_window, width = 15).grid(row=3,column=0, padx=5, pady=6),\n \"DEBWT\": Button(\n self, text=\"reverse BWT\", command=self.debwt_window,width = 15).grid(\n row=6,column=0, padx=5, pady=6),\n \"Huffcode\": Button(\n self, text=\"Huffman coding\", command=self.huffcode_window, width = 15).grid(\n row=3,column=1, padx=5, pady=6),\n \"Huffdecode\": Button(\n self, text=\"Huffman decoding\", command=self.huffdecode_window, width = 15).grid(\n row=6,column=1, padx=5, pady=6),\n \"fullzip\": Button(\n self, text=\"Full zip\", command=self.fullzip_window, width = 15).grid(\n row=3,column=2, padx=5, pady=6),\n \"fullunzip\": Button(\n self, text=\"Full unzip\", command=self.fullunzip_window, width = 15).grid(\n row=6,column=2, padx=5, pady=6),\n \"generate\": Button(\n self, text=\"Generate\", command=self.generate_random, width = 15).grid(\n row=10,column=1, padx=5, pady=6),\n \"save\": Button(\n self, text=\"Save\", command=self.save_random, width = 15).grid(\n row=11,column=1, padx=5, pady=6)}\n\n self.buttons = buttons", "def add_option_save(self):\n logger.debug(\"Adding save option\")\n btnsave = ttk.Button(self.optsframe,\n image=get_images().icons[\"save\"],\n command=self.save_items)\n btnsave.pack(padx=2, side=tk.RIGHT)\n Tooltip(btnsave,\n text=\"Save {}(s) to file\".format(self.tabname),\n wraplength=200)", "def init_buttons(self):\r\n self.btn_encrypt = QtWidgets.QPushButton('Encrypt')\r\n self.btn_encrypt.clicked.connect(self.encrypt)\r\n self.btn_encrypt.setEnabled(False)\r\n\r\n self.btn_decrypt = QtWidgets.QPushButton('Decrypt')\r\n self.btn_decrypt.clicked.connect(self.decrypt)\r\n self.btn_decrypt.setEnabled(False) \r\n\r\n self.layout_buttons = QtWidgets.QGridLayout()\r\n\r\n self.layout_buttons.addWidget(self.btn_encrypt,0,0)\r\n self.layout_buttons.addWidget(self.btn_decrypt,0,1)", "def on_buttonBox_clicked(self, button):\n if button == self.buttonBox.button(QDialogButtonBox.Save):\n self.on_saveButton_clicked()\n elif button == self.refreshButton:\n self.on_refreshButton_clicked()", "def init_save_page(self):\n\n interface_width = self.interface.GAME_WINDOW_WIDTH;\n interface_height = self.interface.GAME_WINDOW_HEIGHT;\n\n bttn_save = Button(\"Sauvegarder la partie\");\n bttn_save.set_color((0, 0, 0));\n bttn_save.set_background_color((255, 255, 255));\n bttn_save.set_pos((interface_width/2, 330));\n bttn_save.set_padding(10);\n bttn_save.set_text_size(24);\n bttn_save.set_border(True);\n bttn_save.set_border_color((0, 224, 73));\n bttn_save.set_border_thickness(3);\n\n bttn_dont_save = Button(\"Quitter sans sauvegarder\");\n bttn_dont_save.set_color((0, 0, 0));\n bttn_dont_save.set_background_color((255, 255, 255));\n bttn_dont_save.set_pos((interface_width/2, 440));\n bttn_dont_save.set_padding(10);\n bttn_dont_save.set_text_size(24);\n bttn_dont_save.set_border(True);\n bttn_dont_save.set_border_color((0, 224, 73));\n bttn_dont_save.set_border_thickness(3);\n\n bttn_cancel = Button(\"Retour au jeu\");\n bttn_cancel.set_color((0, 0, 0));\n bttn_cancel.set_background_color((255, 255, 255));\n bttn_cancel.set_pos((interface_width/2, 500));\n bttn_cancel.set_padding(10);\n bttn_cancel.set_text_size(24);\n bttn_cancel.set_border(True);\n bttn_cancel.set_border_color((0, 224, 73));\n bttn_cancel.set_border_thickness(3);\n\n\n\n page = Page.Save;\n self.l_button_to_draw_by_page[page].append(bttn_save);\n self.l_button_to_draw_by_page[page].append(bttn_cancel);\n self.l_button_to_draw_by_page[page].append(bttn_dont_save);" ]
[ "0.76084", "0.65428025", "0.6530788", "0.6467536", "0.64535373", "0.63752264", "0.63124067", "0.6305843", "0.6294539", "0.6227772", "0.62205535", "0.61544055", "0.6147563", "0.61214584", "0.609148", "0.6088591", "0.6075969", "0.6054853", "0.60379755", "0.6025561", "0.60076374", "0.5996808", "0.5947553", "0.5946536", "0.5942512", "0.59319764", "0.5930442", "0.5928986", "0.5830801", "0.5823322" ]
0.72857726
1
Set buttons to resetprompt state.
def _set_mode_prompt_reset(cls): cls._disconnect_buttons() cls.btn_startpause.setText("Confirm Reset") cls.btn_startpause.setIcon(QIcon.fromTheme("edit-undo")) cls.btn_startpause.setWhatsThis("Discard time and reset timer.") cls.btn_startpause.clicked.connect(cls.reset) cls.btn_stopsave.setIcon(QIcon.fromTheme("document-save")) cls.btn_stopsave.setWhatsThis("Save time and notes to log.") cls.btn_stopsave.setText("Save") cls.btn_stopsave.setEnabled(True) cls.btn_stopsave.clicked.connect(cls.save)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Reset(self):\n self.prompt_str = self.prompt_ev.FirstPromptEvaluator()", "def reset_game_ui(self):\n for button in self.button_list:\n button.configure(state='normal', text='')\n self.instructions.configure(text=self.PROMPT.format(self.players[self.game.whose_turn]))\n self.player_0_score_label.configure(text=self.SCORE_LABEL.format(self.players[0], self.game.player_0_score))\n self.player_1_score_label.configure(text=self.SCORE_LABEL.format(self.players[1], self.game.player_1_score))\n self.num_ties_label.configure(text=self.TIE_LABEL.format(self.game.num_ties))", "def reset_buttons(button_list):\n\n for button in button_list:\n button.reset_click()", "def on_reset_button(self, event):\n text = _(u\"Reset button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n\n dialog_box = MyDialog_monitor(\n self, -1, _(u\"Select signals to monitor\"), self.names, self.devices, self.monitors)\n dialog_box.Destroy()\n\n global hold_monitor\n differential = hold_monitor\n\n hold_monitor = dict.fromkeys(hold_monitor, False)\n\n dialog_box = MyDialog_monitor(\n self, -1, _(u\"Select signals to monitor\"), self.names, self.devices, self.monitors)\n\n hold_monitor = differential\n dialog_box.ok_button(wx.EVT_BUTTON)\n\n dialog_box.Destroy()\n hold_monitor = dict.fromkeys(hold_monitor, False)\n if self.state == 0:\n self.canvas_2d.Refresh()\n else:\n self.canvas_3d.Refresh()\n global global_cycles_completed\n global_cycles_completed = 0", "def reset(self):\r\n\r\n self.make_board()\r\n\r\n # configure each buttons text option to an empty string\r\n for row in range(3):\r\n for column in range(3):\r\n self.board[row][column][0]['text'] = ''", "def reset_buttons(self):\n global intervalAdd_\n global intervalDel_\n global annotationAdd_\n global annotationDel_\n\n # self.active_mode = 'default', 'intervalAdd', 'intervalDel'\n if self.active_mode != 'intervalAdd':\n self.push2_1.setChecked(False)\n intervalAdd_ = False\n\n if self.active_mode != 'intervalDel':\n self.push2_2.setChecked(False)\n intervalDel_ = False\n\n if self.active_mode != 'annotationAdd':\n self.push1_1.setChecked(False)\n annotationAdd_ = False\n\n if self.active_mode != 'annotationDel':\n self.push1_2.setChecked(False)\n annotationDel_ = False", "def _clear_all_input(self) -> None:\n page = self.pages[self.current_page]\n for button in page.buttons:\n if isinstance(button, InputButton):\n button.update_name(button.prompt + '0')", "def reset(self):\n self.value.put(0)\n self.input_pv.put(\"\")\n self.input_trigger.put(\"Yes\")", "def reset(self):\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setText(\" \")", "def fueling_reset(self):\n pos.select_dispenser(1)\n crindsim.swipe_card()\n if system.wait_for(lambda: \"debit\" in crindsim.get_display_text().lower(), verify = False):\n crindsim.press_softkey(\"no\")\n if system.wait_for(lambda: \"zip\" in crindsim.get_display_text().lower(), verify = False):\n crindsim.press_keypad(\"2\")\n crindsim.press_keypad(\"7\")\n crindsim.press_keypad(\"4\")\n crindsim.press_keypad(\"1\")\n crindsim.press_keypad(\"0\")\n crindsim.press_keypad(\"enter\")\n if system.wait_for(lambda: \"carwash\" in crindsim.get_display_text().lower(), verify = False):\n crindsim.press_softkey(\"no\")\n crindsim.lift_handle()\n crindsim.open_nozzle()\n pos.click(\"reset\")\n pos.click(\"yes\")\n crindsim.close_nozzle()\n crindsim.lower_handle()\n #Checks crind diag to see if reset message is displayed\n if not system.wait_for(lambda: \"reset\" in pos.read_dispenser_diag()[\"Status\"].lower(), verify = False):\n tc_fail(\"CRIND did not reset\")\n #Wait for crind to return to idle\n if not system.wait_for(lambda: \"idle\" in pos.read_dispenser_diag()[\"Status\"].lower(), timeout = 120, verify = False):\n tc_fail(\"CRIND did not return to idle\")\n pos.click(\"back\")", "def ResetButtons(self):\r\n\r\n floating = self.HasFlag(self.optionFloating)\r\n self.buttons = []\r\n\r\n if not floating and self.HasMinimizeButton():\r\n button = AuiPaneButton(AUI_BUTTON_MINIMIZE)\r\n self.buttons.append(button)\r\n \r\n if not floating and self.HasMaximizeButton():\r\n button = AuiPaneButton(AUI_BUTTON_MAXIMIZE_RESTORE)\r\n self.buttons.append(button)\r\n\r\n if not floating and self.HasPinButton():\r\n button = AuiPaneButton(AUI_BUTTON_PIN)\r\n self.buttons.append(button)\r\n\r\n if self.HasCloseButton():\r\n button = AuiPaneButton(AUI_BUTTON_CLOSE)\r\n self.buttons.append(button)", "def resetPressed(self):\n print(\"Reset button has been pressed!\")\n self.save_file = open(os.path.join(args.parent_img_path, self.save_file_name), \"a\")\n self.save_file.write(\"\\n\" + \"-------*Reset Button Pressed*-------\" + \"\\n\")\n self.save_file.close()\n # CTWM Resetting\n if self.showCTWM:\n self.outcomesCTWM = np.array([])\n self.CTWMx = [0]\n self.CTWMy = [0]\n self.curveCTWMGraph.setData(x=self.CTWMx, y=self.CTWMy)\n self.CTWMGraph.clear()\n self.CTWMGraph.draw()\n # Does what initImgCTWM does\n self.labelFailureGrayCTWM.setPixmap(self.images['imageFailureGrayCTWM'])\n self.labelAdvancedGrayCTWM.setPixmap(self.images['imageAdvancedGrayCTWM'])\n self.labelAverageGrayCTWM.setPixmap(self.images['imageAverageGrayCTWM'])\n self.labelGoodGrayCTWM.setPixmap(self.images['imageGoodGrayCTWM'])\n\n # WHM resetting\n if self.showWHM:\n self.outcomesWHM = np.array([])\n self.curveWHMGraph.setData(x=[0], y=[0])\n self.WHMGraph.clear()\n self.WHMGraph.draw()\n # Does what initImgWHM does\n self.labelLevelFourWHM.setPixmap(self.images['imageLevelFourWHMgray'])\n self.labelLevelThreeWHM.setPixmap(self.images['imageLevelThreeWHMgray'])\n self.labelLevelTwoWHM.setPixmap(self.images['imageLevelTwoWHMgray'])\n self.labelLevelOneWHM.setPixmap(self.images['imageLevelOneWHMgray'])\n\n # RTLE resetting\n if self.showRTLE:\n self.speed.setText(\"\")\n self.feed.setText(\"\")\n self.RTLEGraph.clear()\n self.RTLEGraph.draw()\n self.labelTimeAverageWear.setText(\"\")\n self.labelTimeAdvancedWear.setText(\"\")\n self.labelTimeFailureWear.setText(\"\")\n self.curveRTLEGraph = pg.BarGraphItem(name=\"RLTEGraph\", x=[1], height=30, width=3, brush='d9d9d9')\n self.curveRTLEGraph.rotate(-90) # horizontal graph\n self.RTLEGraph.addItem(self.curveRTLEGraph)\n\n ###############################################################################################################\n # #\n # INITIALIZING ALL THE WIDGETS #\n # #\n ###############################################################################################################", "def do_reset(self, line):\n self.prompt = self.default_prompt\n self.current_frame = None", "def reset(self):\n\n self.ids.score.text = '0'\n self.ids.end_button.text = 'submit'\n self.ids.end_button.disabled = True\n self.ids.end_button.opacity = 0\n self.time = None\n\n board = self.ids.board\n\n for item in board.children:\n if isinstance(item, Space) is True:\n space = game.spacelist[int(item.number)]\n item.text = ''\n item.atom = space.atom\n item.guess = False\n item.correct = False\n item.disabled = False\n\n elif isinstance(item, Marker) is True:\n item.text = ''\n item.disabled = False\n\n for i in range(1, 6):\n self.ids['tracker' + str(i)].color = scheme.white", "def reset(self, event):\n #Resets the current puzzle\n self.w.delete('all') #Deletes all widgets/components \n self.resetnums() #Call restnums() to reset self.movelist\n\n #Destroys all buttons on GUI\n #self.buttonlist.append(self.lbl)\n for i in range(len(self.buttonlist)):\n self.buttonlist[i].destroy()\n\n self.create_widgets(self.counter) #Calls the create_widgets() to redisplay all widgets and buttons\n self.lbl2[\"text\"] = \"\" #Clears any text (e.g. instructions or check) if there is any.", "def __clear(self):\n for i in range(len(self.buttons_list)):\n self.labels_strvar[i].set(\"\")\n if self.buttons_list[i][\"state\"] == DISABLED:\n self.buttons_list[i][\"state\"] = NORMAL\n self.entered_list = []\n return", "def confirm_reset(self):\r\n confirm = QMessageBox.question(self,\r\n self.confirmDBClearTitleString,\r\n self.confirmDBClearQuestionString,\r\n QMessageBox.Yes |\r\n QMessageBox.No,\r\n QMessageBox.No)\r\n\r\n if confirm == QMessageBox.Yes:\r\n self.reset()", "def confirm_reset(self):\r\n confirm = QMessageBox.question(self,\r\n self.confirmDBClearTitleString,\r\n self.confirmDBClearQuestionString,\r\n QMessageBox.Yes |\r\n QMessageBox.No,\r\n QMessageBox.No)\r\n\r\n if confirm == QMessageBox.Yes:\r\n self.reset()", "def _doReset(self):\n self._cmdReset()", "def resetGUI(self):\n # Updated 11/19/16\n # enable alteration of times\n self.cb_init_time_min.setEnabled(True)\n self.cb_init_time_sec.setEnabled(True)\n \n self.cb_side1_time_min.setEnabled(True)\n self.cb_side1_time_sec.setEnabled(True)\n \n self.cb_side2_time_min.setEnabled(True)\n self.cb_side2_time_sec.setEnabled(True)\n \n self.cb_repeat_cycle.setEnabled(True) \n \n # set current times and Cycle to \"-\"\n self.current_Init_Time.setText(\"-\")\n self.current_Side_1_Time.setText(\"-\")\n self.current_Side_2_Time.setText(\"-\")\n self.current_Rep_Cycle.setText(\"-\")\n \n # set total times to \"-\"\n self.Init_tot.setText(\"-\")\n self.Side_1_tot.setText(\"-\")\n self.Side_2_tot.setText(\"-\")\n self.Rep_Cycle_tot.setText(\"-\")", "def call_reset(self, _):\n return MENU_RESET", "def call_reset(self, _):\n return MENU_RESET", "def call_reset(self, _):\n return MENU_RESET", "def button_reset(self): \n self.button_1 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 570)\n self.button_2 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 75)\n self.button_3 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 570)\n self.button_4 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 75)\n a = [1, 2, 3, 4]\n for i in a:\n self.puzzle.remove_value(i)", "def _set_mode_prompt_stop(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Resume\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Resume timer from current time.\")\n cls.btn_startpause.clicked.connect(cls.resume)\n\n cls.btn_stopsave.setText(\"Confirm Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.stop)\n cls.btn_stopsave.setEnabled(True)", "def reset(self):\n return self.set_command(\"Z\")", "def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]", "def _setInitialButtonState(self):\r\n \r\n self.addButton.setEnabled(not self._model.isReadOnly)\r\n self.editButton.setEnabled(False)\r\n self.clearValueButton.setEnabled(False)\r\n self.deleteButton.setEnabled(False)\r\n self.revertButton.setEnabled(False)", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def reset(cls):\n\n cls._set_mode_stopped()\n TimeDisplay.reset_time(erase=True)\n TimeDisplay.show_default()\n Notes.clear()\n for callback in cls.reset_callback:\n callback()" ]
[ "0.7271416", "0.69852114", "0.6692109", "0.665375", "0.66360044", "0.6543945", "0.64558303", "0.644347", "0.6367087", "0.6281809", "0.62665826", "0.62448794", "0.61734205", "0.61626613", "0.6158813", "0.6125091", "0.6106521", "0.61056757", "0.61019343", "0.60933787", "0.6088602", "0.6088602", "0.6088602", "0.6036071", "0.60355467", "0.60226923", "0.6017357", "0.60126275", "0.59631014", "0.5936257" ]
0.8170068
0
Set buttons to runningclock state.
def _set_mode_running(cls): cls._disconnect_buttons() cls.btn_startpause.setText("Pause") cls.btn_startpause.setIcon(QIcon.fromTheme("media-playback-pause")) cls.btn_startpause.setWhatsThis("Pause timer.") cls.btn_startpause.clicked.connect(cls.pause) cls.btn_stopsave.setText("Stop") cls.btn_stopsave.setIcon(QIcon.fromTheme("media-playback-stop")) cls.btn_stopsave.setWhatsThis( "Stop timer. Timer must be stopped " "before you can save." ) cls.btn_stopsave.clicked.connect(cls.prompt_stop) cls.btn_stopsave.setEnabled(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pauseRunning(self):\r\n self.start_button['state'] = 'normal'\r\n self.pause_button['state'] = 'disabled'\r\n self.running = False", "def initialize_buttons(self):\r\n self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)\r\n self.start_button.grid(row=0, column=0)\r\n\r\n self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)\r\n self.pause_button.grid(row=0, column=1)\r\n\r\n self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)\r\n self.graph_button.grid(row=0, column=2)\r\n \r\n self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)\r\n self.plot_button.grid(row=0, column=3)\r\n \r\n self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)\r\n self.draw_button.grid(row=0, column=4)\r\n \r\n # Initialize Button States and Actions\r\n self.pause_button['state'] = 'disabled'\r\n # Boolean switch to control flow of placement process\r\n self.running = False\r\n # Boolean switch to plot placement connections and tags, turn off for faster processing\r\n self.plot = False\r\n self.drawing = False\r\n self.graph = False\r\n # Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program\r\n self.firstRun = True", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def _startbuttontimers(self):\n changetimes = {}\n for b in self._buttons:\n if not b._times:\n continue\n t0, t1 = b._times\n changetimes[t0] = changetimes[t1] = 1\n for t in changetimes.keys():\n mw_globals.toplevel.settimer(t, (self._window._buttonschanged, ()))", "def _set_mode_stopped(cls):\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Start\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Start a new timer.\")\n cls.btn_startpause.clicked.connect(cls.start)\n\n cls.btn_stopsave.setIcon(QIcon.fromTheme(None))\n cls.btn_stopsave.setText(\"Stopped\")\n cls.btn_stopsave.setWhatsThis(\"Timer is already stopped.\")\n cls.btn_stopsave.setEnabled(False)", "def _set_mode_paused(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Resume\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-start\"))\n cls.btn_startpause.setWhatsThis(\"Resume timer from current time.\")\n cls.btn_startpause.clicked.connect(cls.resume)\n cls.btn_stopsave.setText(\"Stop\")\n\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.prompt_stop)\n cls.btn_stopsave.setEnabled(True)", "def startRunning(self):\r\n self.start_button['state'] = 'disabled'\r\n self.pause_button['state'] = 'normal'\r\n self.running = True\r\n \r\n # If first run and not continuation from pause\r\n if (self.firstRun):\r\n self.start_timer = time.clock()\r\n # Simulated Annelaing Function\r\n self._startpartition(False)\r\n # Always display result at the end of the process\r\n self.updateDraw()\r\n #self.updatePlot() #TODO: What to plot\r\n # Disable Buttons when finished\r\n self.pause_button['state'] = 'disabled'\r\n self.plot_button['state'] = 'disabled'\r\n self.draw_button['state'] = 'disabled'", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def cycle_but_state(num):\n states = (tk.DISABLED, tk.NORMAL, tk.ACTIVE)\n but = root.buttons[num]\n state = but.cget(state_s)\n index = states.index(state)\n new_state = states[(index + 1) % len(states)]\n but.config(state=new_state)\n print(\"BUT %d STATE is \" % num, new_state.upper(), \", was \", state)\n if isinstance(but, TTButton):\n print(\"BUT %d DUMP():\" % num, but.dump())\n else:\n print(\"BUT %d CONFIG():\" % num, but.config())", "def change_button_state(self, button_state='normal'):\n self.button1.configure(state=button_state)\n self.button2.configure(state=button_state)\n self.button3.configure(state=button_state)\n self.button4.configure(state=button_state)\n self.button5.configure(state=button_state)\n self.button6.configure(state=button_state)\n self.button7.configure(state=button_state)\n self.button8.configure(state=button_state)\n self.button9.configure(state=button_state)", "def update(self):\n for (x, y) in self.board.fields:\n text = self.board.fields[x, y]\n self.buttons[x, y]['text'] = text\n self.buttons[x, y]['disabledforeground'] = 'black'\n if text == self.board.empty:\n self.buttons[x, y]['state'] = 'normal'\n else:\n self.buttons[x, y]['state'] = 'disabled'\n winning = self.board.won()\n if winning:\n for x, y in winning:\n self.buttons[x, y]['disabledforeground'] = 'red'\n for x, y in self.buttons:\n self.buttons[x, y]['state'] = 'disabled'\n for (x, y) in self.board.fields:\n self.buttons[x, y].update()", "def reset_buttons(self):\n global intervalAdd_\n global intervalDel_\n global annotationAdd_\n global annotationDel_\n\n # self.active_mode = 'default', 'intervalAdd', 'intervalDel'\n if self.active_mode != 'intervalAdd':\n self.push2_1.setChecked(False)\n intervalAdd_ = False\n\n if self.active_mode != 'intervalDel':\n self.push2_2.setChecked(False)\n intervalDel_ = False\n\n if self.active_mode != 'annotationAdd':\n self.push1_1.setChecked(False)\n annotationAdd_ = False\n\n if self.active_mode != 'annotationDel':\n self.push1_2.setChecked(False)\n annotationDel_ = False", "def toggle_run_button(self, event):\n if not self.running:\n self.start_thread()\n else:\n self.stop_thread()", "def update_buttons(self):\n # Enable the Add/Remove step buttons if a Generator is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)", "def set_controls_state(self, state: bool):\n for control in self.controls:\n control.setEnabled(state)\n self.LblDacVal.setText(\"НСизвСстно\")\n self.LblFreqVal.setText(\"-\")\n self.LblStateVal.setText(\"Π ΡƒΡ‡Π½ΠΎΠΉ\")\n self.LblMoveVal.setText(\"0 Π“Ρ†\")\n self.LblResVal.setText(\"НСизвСстно\")\n self.LblAttVal.setText(\"НСизвСстно\")\n self.SpinRough.setValue(0)\n self.SpinFine.setValue(0)\n self.SpinDACValue.setValue(0)\n self.SpinAttenuate.setValue(30)\n self.TxtLog.clear()\n if not self.calibr_table:\n self.SpinFine.setEnabled(False)\n self.BtnSetFine.setEnabled(False)\n for btn in self.btns.keys():\n btn.setStyleSheet(\"\")\n self.btns[btn].setStyleSheet(\"font: 16px\")", "def continue_button(self):\r\n self.update_settings()\r\n self.is_pause = False\r\n self.is_step = False\r\n if self.continue_call is not None:\r\n self.wm.after(1, self.continue_call)", "def paint_project_button(self, running):\r\n if running:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"red_btn.png\"))))\r\n else:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"green_btn.png\"))))\r\n self.btn_start.setIconSize(QSize(\r\n self.btn_start.width(), self.btn_start.height()))", "def controls(self):\n\n framecrtl = tk.Frame(self)\n framecrtl.pack()\n\n run = ModernButton(framecrtl, text='Run', command=self.start_threading, width=5)\n run.pack(side='left', padx=5, pady=5)\n\n cancel = ModernButton(framecrtl, text='Cancel', command=self.destroy, width=5)\n cancel.pack(side='left', padx=5, pady=5)\n\n abort = ModernButton(framecrtl, text='Abort', command=self.root.destroy, width=5)\n abort.pack(side='left', padx=5, pady=5)", "def set_but_ctrls(self, s_state) :\n logger.debug('In %s.set_but_ctrls received state: %s' % (self._name, s_state))\n state = s_state.lower()\n\n if state == self.s_running :\n self.but_play.setIcon(icon.icon_playback_pause_sym)\n self.but_play.setAccessibleName(self.s_play_pause)\n\n else : # elif state == self.s_paused :\n self.but_play.setIcon(icon.icon_playback_start_sym)\n self.but_play.setAccessibleName(self.s_play_start)\n\n self.but_play.setIconSize(QSize(48, 48))\n self.set_tool_tips()\n self.set_but_play_enabled(True) # unlock play button", "def OnButton1(self):\n self.start_time = self.start_time.Minutes(DEFAULT_TIMER)\n self.timertext.SetLabel(self.start_time.Format(\"%M:%S\"))\n self.timerNotZero = True\n self.blinkPhase = 0\n self.timertext.SetForegroundColour('black')\n self.button1.SetBackgroundColour('white')", "def start_aco(self, event):\n if not self.running:\n self.start_btn_text.set(\"Start\")\n self.pause_btn_text.set(\"Pause\")\n self.running = True\n self.main_window.after(int(self.speed), self.update_aco)", "def normal_run(self):\n super().events_buttons(back=True)\n self.events_delete_btns()\n self.draw()", "def redrawButtons(self):\n for self.a in range(self.btnNumber): # btnNumber = maximum number of buttons\n self.btns[self.a].configure(text=self.btnList[self.a])\n self.btns[self.a].grid()\n self.keypad = 'KeyPad'\n self.title(self.keypad)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def __update_pause_btn(self, value):\n self.pauseBtn.blockSignals(True)\n self.pauseBtn.setChecked(value)\n self.__update_pause_icon()\n self.pauseBtn.blockSignals(False)", "def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)", "def enable_video_buttons(self, state_play, state_pause, state_stop):\n self.play_button.setEnabled(state_play)\n self.pause_button.setEnabled(state_pause)\n self.stop_button.setEnabled(state_stop)", "def buttonStatusChange(self,**kwargs):\n # If the dictionary robot value is 'tb1' then change the button Style\n if kwargs['robot']=='tb1':\n if self.robot_TB1_Viewer.isChecked() is True:\n self.robot_TB1_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB1_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb2' then change the button Style\n if kwargs['robot']=='tb2':\n if self.robot_TB2_Viewer.isChecked() is True:\n self.robot_TB2_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB2_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb3' then change the button Style\n if kwargs['robot']=='tb3':\n if self.robot_TB3_Viewer.isChecked() is True:\n self.robot_TB3_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB3_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb4' then change the button Style\n if kwargs['robot']=='tb4':\n if self.robot_TB4_Viewer.isChecked() is True:\n self.robot_TB4_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB4_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")", "def turn_on_buttons(self):\n self.edit_button.setEnabled(True)\n self.delete_button.setEnabled(True)" ]
[ "0.69890445", "0.64806527", "0.6479749", "0.64516985", "0.62913656", "0.6268325", "0.62614596", "0.61545545", "0.612091", "0.6110985", "0.6019341", "0.60112214", "0.5980356", "0.5966523", "0.5930903", "0.58914804", "0.58716834", "0.5860595", "0.58567125", "0.5829467", "0.5811157", "0.5799874", "0.57991314", "0.5774741", "0.5774741", "0.57668024", "0.576466", "0.57565993", "0.5734935", "0.5722836" ]
0.7214415
0
Set buttons to pausedtime state.
def _set_mode_paused(cls): cls._disconnect_buttons() cls.btn_startpause.setText("Resume") cls.btn_startpause.setIcon(QIcon.fromTheme("media-playback-start")) cls.btn_startpause.setWhatsThis("Resume timer from current time.") cls.btn_startpause.clicked.connect(cls.resume) cls.btn_stopsave.setText("Stop") cls.btn_stopsave.setIcon(QIcon.fromTheme("media-playback-stop")) cls.btn_stopsave.setWhatsThis( "Stop timer. Timer must be stopped " "before you can save." ) cls.btn_stopsave.clicked.connect(cls.prompt_stop) cls.btn_stopsave.setEnabled(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pauseRunning(self):\r\n self.start_button['state'] = 'normal'\r\n self.pause_button['state'] = 'disabled'\r\n self.running = False", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "def pause_button(self):\r\n self.is_action = True\r\n self.update_settings()\r\n self.is_pause = True\r\n if self.pause_call is not None:\r\n self.wm.after(1, self.pause_call)", "def toggle_pause(self):\n self.m_btn_pause = not self.m_btn_pause", "def _toggle_paused(self, paused=None):\n #automatically start the first wave\n if self._wave == 0:\n self.next_wave()\n\n if paused is None:\n paused = not self._paused\n\n #Task 1.5 (Play Controls): Reconfigure the pause button here\n \n if paused:\n self.pause()\n self._play_button_text.set(\"play\")\n else:\n self.start()\n self._play_button_text.set(\"pause\")\n\n self._paused = paused", "def __update_pause_btn(self, value):\n self.pauseBtn.blockSignals(True)\n self.pauseBtn.setChecked(value)\n self.__update_pause_icon()\n self.pauseBtn.blockSignals(False)", "def pause(self):\n self.paused_time = time.time()\n self.paused = True", "def toggle_pause(self, sender):\n if self.pause:\n self.app.title = 'checking'\n sender.title = 'pause'\n self.pause = False\n else:\n self.app.title = 'paused'\n sender.title = 'start'\n self.pause = True", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def on_pushButton_toggled(self, checked):\n self.isPause = checked", "def _set_mode_running(cls):\n\n cls._disconnect_buttons()\n\n cls.btn_startpause.setText(\"Pause\")\n cls.btn_startpause.setIcon(QIcon.fromTheme(\"media-playback-pause\"))\n cls.btn_startpause.setWhatsThis(\"Pause timer.\")\n cls.btn_startpause.clicked.connect(cls.pause)\n\n cls.btn_stopsave.setText(\"Stop\")\n cls.btn_stopsave.setIcon(QIcon.fromTheme(\"media-playback-stop\"))\n cls.btn_stopsave.setWhatsThis(\n \"Stop timer. Timer must be stopped \" \"before you can save.\"\n )\n cls.btn_stopsave.clicked.connect(cls.prompt_stop)\n cls.btn_stopsave.setEnabled(True)", "def enable_video_buttons(self, state_play, state_pause, state_stop):\n self.play_button.setEnabled(state_play)\n self.pause_button.setEnabled(state_pause)\n self.stop_button.setEnabled(state_stop)", "def _control_pause(self):\n self.player.pause()", "def pause(cls):\n\n cls._set_mode_paused()\n TimeDisplay.stop_time()\n for callback in cls.pause_callback:\n callback()", "def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()", "def PAUSED(self):\n self.pause_state = self.get_state() # the state FSM was in before 'op-pause' was called\n self.continue_state = self.pause_state\n self.update_status(self.STATES.PAUSED)", "def grab_paused(self):\n\t\tif time.time() - self.time_paused > PAUSE_INTERVAL:\n\t\t\tself.status.state = 'stop'", "def pause_update(self):\n if self.pause_event:\n self.pause_event.activate()", "def on_worker_paused(self):\n self.playing = False\n self.pausing = False\n self.enable_video_buttons(True, False, True)", "def unpause(self):\n self.paused = False", "def pause(self):\n \n self.pause = True", "def start_pause(self, **kwargs):\n if self.is_on:\n self.turn_off()\n else:\n self.turn_on()", "def paused(self) -> bool:", "def pause(self):\n pass", "def pause(self):\n pass", "def togglePause(self):\n self.model.paused = not self.model.paused\n self.proc.send_signal(signal.SIGUSR1)", "def _paused(self):\n self._last = self._touch\n m = 'You have '+`self._game.getPlayerLives()`+' lives remaining.\\nClick to continue'\n f = 25\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_COUNTDOWN\n self._game.resetBall()\n self._game.resetPaddle()\n m = ''\n self._last = self._touch\n self._countdownTime = 0\n self._countdownMessage = GLabel(text='3', font_size=40,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)\n self._pausedMessage = GLabel(text=m,font_size=f,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)", "def pause_videos(self):\n if (not self.playing) or (self.pausing) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.pausing = True\n\n # Pause the background worker\n self.worker.force_pause()", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "def setup_button_pause(self):\n pause_icon = tk.PhotoImage(file = self.pause_icon)\n self.button_pause = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = pause_icon,\n command = self.pause_world)\n self.button_pause.image = pause_icon\n self.button_pause.grid(row = 0, column = 3, sticky = tk.W)" ]
[ "0.75215256", "0.7443539", "0.7351155", "0.73039067", "0.72980815", "0.72020006", "0.71098685", "0.7038338", "0.70222014", "0.6805279", "0.67249453", "0.6716562", "0.6697442", "0.6694803", "0.66640294", "0.66182137", "0.6597947", "0.6588095", "0.6575589", "0.65361243", "0.64974135", "0.6423966", "0.6418535", "0.6398062", "0.6398062", "0.6395473", "0.63951695", "0.6382411", "0.63633925", "0.63618433" ]
0.7912897
0
Testing Crofton Method on data from Crofton 1971
def test_crofton_method(self): chi_sq = lambda pred, obs: np.sum((obs - pred)**2 / pred) para_crof = np.arange(0, 9) crof = PIHM() # Data from Crofton 1971 st1_obs = np.array([161, 111, 67, 65, 50, 30, 33, 13, 8]) st2_obs = np.array([189, 129, 86, 51, 27, 14, 8, 1, 2]) st3_obs = np.array([458, 81, 40, 22, 19, 4, 6, 3, 0]) st4_obs = np.array([164, 147, 92, 43, 25, 11, 3, 0, 1]) st5_obs = np.array([140, 77, 30, 14, 10, 3, 2, 0, 0]) st6_obs = np.array([153, 29, 6, 2, 1, 0, 0, 0, 0]) # Raw data st1_raw = np.repeat(para_crof, st1_obs) st2_raw = np.repeat(para_crof, st2_obs) st3_raw = np.repeat(para_crof, st3_obs) st4_raw = np.repeat(para_crof, st4_obs) st5_raw = np.repeat(para_crof, st5_obs) st6_raw = np.repeat(para_crof, st6_obs) # Crofton predicted with truncations! st1_pred = { 3: np.array([162.2, 103.9, 77.4, 60.4, 48.2, 38.9, 31.7, 26.0, 21.4]), 4: np.array([162.4, 103.4, 77.6, 61.2, 49.4, 40.5, 33.5, 27.9, 23.3]), 5: np.array([161.2, 107.0, 78.0, 58.7, 44.7, 34.4, 26.6, 20.7, 16.1]), 6: np.array([162.1, 104.6, 77.3, 59.5, 46.7, 37.1, 29.7, 23.9, 19.3]), 7: np.array([160.2, 108.8, 79.2, 58.9, 44.3, 33.6, 25.5, 19.5, 14.9]), 8: np.array([158.4, 111.7, 80.8, 59.0, 43.2, 31.7, 23.3, 17.2, 12.7])} st2_pred = { 3: np.array([188.6, 130.7, 83.6 , 52.0, 31.9, 19.4, 11.7, 7.0, 4.2]), 4: np.array([187.9, 133.0, 83.0, 49.4, 28.7, 16.5, 9.3, 5.3, 2.9,]), 5: np.array([187.3, 134.5, 83.0, 48.4, 27.4, 15.3, 8.4, 4.6, 2.5]), 6: np.array([187.2, 134.8, 83.1, 48.3, 27.3, 15.1, 8.3, 4.5, 2.4]), 7: np.array([185.3, 138.1, 84.2, 47.5, 25.7, 13.6, 7.0, 3.6, 1.8]), 8: np.array([185.5, 137.9, 84.1, 47.5, 25.8, 13.7, 7.1, 3.7, 1.9])} st3_pred = { 3: np.array([457.9, 81.8, 39.0, 22.5, 14.1, 9.2, 6.3, 4.3, 3.0]), 4: np.array([458.1, 79.5, 40.3, 25.0, 16.9, 12.0, 8.8, 6.5, 5.0]), 5: np.array([457.5, 83.7, 39.0, 21.9, 13.3, 8.5, 5.5, 3.7, 2.5]), 6: np.array([457.0, 83.4, 39.1, 22.0, 13.5, 8.6, 5.7, 3.8, 2.2]), 7: np.array([457.4, 84.1, 39.1, 21.8, 13.2, 8.3, 5.4, 3.6, 2.4])} # Station 1 comparison trunc = np.arange(3, 9) crof_st1_ks = [0.754, 0.736, 0.834, 0.776, 0.874, 0.951] crof_st1_N = [677, 708, 606, 646, 594, 573] our_st1_N = [] our_st1_ks = [] crof_st1_compare_ss = [] crof.data = st1_raw for i in trunc: bin_edges = list(np.arange(0, i + 1)) + [i + 0.9] N, mu, k = crof.crofton_method(bin_edges) our_st1_ks.append(k) our_st1_N.append(N) assert_array_almost_equal(crof_st1_ks, our_st1_ks, decimal=1) # This will fail, but it is close # Uncomment and run nosetests to see results # assert_array_equal(crof_st1_N, our_st1_N) # Station 2 comparison crof_st2_ks = [1.182, 1.313, 1.392, 1.407, 1.574, 1.563] crof_st2_N = [535, 520, 514, 513, 508, 509] our_st2_N = [] our_st2_ks = [] crof_st2_compare_ss = [] crof.data = st2_raw for i in trunc: bin_edges = list(np.arange(0, i + 1)) + [i + 0.9] N, mu, k = crof.crofton_method(bin_edges) our_st2_ks.append(k) our_st2_N.append(N) assert_array_almost_equal(crof_st2_ks, our_st2_ks, decimal=1) assert_array_equal(np.round(crof_st2_N, -1), np.round(our_st2_N, -1)) # Station 3 comparison trunc = np.arange(3, 8) crof_st3_ks = [.229, .206, .244, .242, .247] crof_st3_N = [646, 670, 642, 643, 641] our_st3_N = [] our_st3_ks = [] crof_st3_compare_ss = [] crof.data = st3_raw for i in trunc: bin_edges = list(np.arange(0, i + 1)) + [i + 0.9] N, mu, k = crof.crofton_method(bin_edges) our_st3_ks.append(k) our_st3_N.append(N) assert_array_almost_equal(crof_st3_ks, our_st3_ks, decimal=1) # Close to equal but not quite. Which is ok. We don't expect them # to be exactly equal # Uncomment and run nosetests to see # assert_array_equal(crof_st3_N, our_st3_N)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cockpit_captur_ii() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/cockpit.captur_ii.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"fuelAutonomy\": 35.0,\n \"fuelQuantity\": 3.0,\n \"totalMileage\": 5566.78,\n }\n\n vehicle_data = cast(\n models.KamereonVehicleCockpitData,\n response.get_attributes(schemas.KamereonVehicleCockpitDataSchema),\n )\n\n assert vehicle_data.totalMileage == 5566.78\n assert vehicle_data.fuelAutonomy == 35.0\n assert vehicle_data.fuelQuantity == 3.0", "def test_cny(self):\n cash_accounts = self.port_values['cash_accounts']\n cash_account = self.extract_cash_account(cash_accounts, 'CNY')\n self.assertNotEqual(cash_account, {})\n\n self.assertEqual(cash_account['account_num'], '012-875-0-603962-0')\n self.assertEqual(cash_account['account_type'], 'Current Account')\n self.assertEqual(cash_account['bank'], 'Bank of China (Hong Kong) Ltd')\n self.assertEqual(cash_account['date'], datetime.datetime(2015,12,10))\n self.assertAlmostEqual(cash_account['balance'], 386920)\n self.assertAlmostEqual(cash_account['fx_rate'], 1.2037)\n self.assertAlmostEqual(cash_account['local_currency_equivalent'], 465735.604)", "def test_ccds(self):\n #TODO write ccds tests", "def test_fetch_crime_sedf(self):\n assert isinstance(_vector.fetch_beach_access_data(f='arcgis'), \n pd.DataFrame)", "def test_cockpit_zoe() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/cockpit.zoe.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\"totalMileage\": 49114.27}\n\n vehicle_data = cast(\n models.KamereonVehicleCockpitData,\n response.get_attributes(schemas.KamereonVehicleCockpitDataSchema),\n )\n\n assert vehicle_data.totalMileage == 49114.27\n assert vehicle_data.fuelAutonomy is None\n assert vehicle_data.fuelQuantity is None", "def covid_data()->str:# make it so json file can change days number\n event_log(\"retrieve covid data...\",\"\")\n c = 0\n covid_info= (\n 'https://api.coronavirus.data.gov.uk/v1/data?'\n 'filters=areaType=nation;areaName=england&'\n 'structure={\"date\":\"date\",\"newCases\":\"newCasesByPublishDate\"}'\n )\n response = get(covid_info, timeout=10)\n result = response.json()\n cases_list=[]\n for x in result['data']:\n cases_list.append((str(x['date']) + \" Cases in the country on that day: \" + str(x['newCases'])))\n if c == 6:#displays covid cases for past 7 days\n break\n c+=1\n return cases_list", "def test_cambridge_rent_price_per_sqft():\n dataframe = get_final_zillow_dataframe()\n cambridge = get_city_state_row(dataframe, 'cambridge', 'massachusetts')\n assert round(cambridge.iloc[0].get('ZRIFAH'), 1) == 2.9", "def test_rr_cineg(results):\n try:\n results.params_ci(cilevel=-50)\n except ValueError:\n pass\n else:\n raise AssertionError", "def test_coden(self):\n inv_search = \"journal:aphys\"\n spi_search = \"find coden aphys\"\n self._compare_searches(inv_search, spi_search)", "def get_cases():\n # Deprecated warning\n url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\"\n warnings.warn(\"This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.\", DeprecatedWarning, stacklevel=2)\n print(\"These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).\")\n return _get_table(url, \"time_series_covid19_confirmed_global.csv\", source=\"jhu\", update=True)", "def test_cc(self):\n result = self.test_client.cc\n\n assert result == \"Russia\"", "def fit_covid_function(self):\r\n return", "def usa_covid_cases():\n output_df = pd.DataFrame()\n covid_df = pd.read_csv(r'https://covid.ourworldindata.org/data/owid-covid-data.csv')\n usa_df = covid_df[covid_df[\"iso_code\"] == \"USA\"]\n # print(usa_df.columns)\n output_df[\"Date\"] = usa_df[\"date\"]\n output_df[\"New Cases\"] = usa_df[\"new_cases\"]\n output_df[\"Fully Vaccinated / 100\"] = usa_df[\"people_fully_vaccinated_per_hundred\"]\n print(output_df)\n output_df.to_csv(\"database/usa_covid.csv\", index=False)", "def test_ctcpQuery_DCC(self):\n self.client.ctcpQuery_DCC(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods,\n [\n (\n \"ctcpMakeReply\",\n (\"Wolf\", [(\"ERRMSG\", \"DCC data :Unknown DCC type 'DATA'\")]),\n )\n ],\n )", "def test_codon_usage_ecoli(self):\n CAI = CodonAdaptationIndex()\n self.assertEqual(\"%0.5f\" % CAI.cai_for_gene(\"ATGCGTATCGATCGCGATACGATTAGGCGGATG\"),\n \"0.09978\")", "def test_cochranebot(self):\n ec = Client()\n with open('data/trialpubs_rtrial.csv', 'rb') as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n article = ec.efetch(db='pubmed', id=row[1])\n for a in article:\n crud.pubmedarticle_to_db(a, 'trial_publications')\n crud.publication_trial(row[1], row[0], 1)\n ec = Client()\n id = 28453187\n doi = '10.1002/14651858.CD011748.pub2'\n article = ec.efetch(db='pubmed', id=id)\n for i, a in enumerate(article):\n crud.pubmedarticle_to_db(a, 'systematic_reviews')\n bot.cochranebot(doi, id)\n conn = psycopg2.connect(**self.postgresql.dsn())\n cursor = conn.cursor()\n cursor.execute(\"SELECT nct_id from review_rtrial where relationship = 'included' and review_id = %s;\", (id,))\n ncts = set(zip(*cursor.fetchall())[0])\n self.assertEqual(ncts,\n {'NCT01516879', 'NCT01644188', 'NCT01507831', 'NCT01439880', 'NCT01854918', 'NCT01592240',\n 'NCT01644175',\n 'NCT01730040', 'NCT01623115', 'NCT01709500', 'NCT01709513'})\n bot.cochrane_ongoing_excluded(doi, id)\n cursor.execute(\"SELECT nct_id from review_rtrial where relationship = 'relevant' and review_id = %s;\", (id,))\n ncts = set(zip(*cursor.fetchall())[0])\n self.assertEqual(ncts,\n {'NCT02729025', 'NCT02207634', 'NCT02392559', 'NCT02833844', 'NCT02642159', 'NCT01663402',\n 'NCT01624142'})", "def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])", "def test_simple_autocorr():\n\n time_point = datetime(2012, 12, 31)\n period = 25\n spy = DEFAULT_ASSET_FACTORY.make_asset(\"SPY\")\n\n test_weatherman = weathermen.simple_linear(CALENDAR, spy)\n forecast = test_weatherman(DEFAULT_ASSET_FACTORY, time_point, period)\n\n assert is_close(forecast.cagr(spy), 0.0958)", "def test_census_county_population():\n dataframe = get_county_population_dataframe()\n boulder_county_row = dataframe.loc[dataframe['county_fips'] == 8013]\n boulder_county_population = boulder_county_row.get('county_population')\n assert float(boulder_county_population) == 326196", "def creep_data(data_set='creep_rupture'):\r\n if not data_available(data_set):\r\n download_data(data_set)\r\n path = os.path.join(data_path, data_set)\r\n tar_file = os.path.join(path, 'creeprupt.tar')\r\n tar = tarfile.open(tar_file)\r\n print('Extracting file.')\r\n tar.extractall(path=path)\r\n tar.close()\r\n all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))\r\n y = all_data[:, 1:2].copy()\r\n features = [0]\r\n features.extend(range(2, 31))\r\n X = all_data[:, features].copy()\r\n return data_details_return({'X': X, 'y': y}, data_set)", "def test_get_book_number(self):\n\t\t\n\t\tself.assertTrue(data.get_book_number('[1 corinthians 1:1]') == 46)", "def pull_nchs_mortality_data(token: str, test_file: Optional[str]=None):\n # Constants\n keep_columns = METRICS.copy()\n type_dict = {key: float for key in keep_columns}\n type_dict[\"timestamp\"] = 'datetime64[ns]'\n\n if test_file:\n df = pd.read_csv(\"./test_data/%s\"%test_file)\n else:\n # Pull data from Socrata API\n client = Socrata(\"data.cdc.gov\", token)\n results = client.get(\"r8kw-7aab\", limit=10**10)\n df = pd.DataFrame.from_records(results)\n # drop \"By Total\" rows\n df = df[df[\"group\"].transform(str.lower) == \"by week\"]\n\n df = standardize_columns(df)\n\n if \"end_date\" in df.columns:\n # Check missing week_ending_date == end_date\n try:\n assert all(df[\"week_ending_date\"] == df[\"end_date\"])\n except AssertionError as exc:\n raise ValueError(\n \"week_ending_date is not always the same as end_date, check the raw file\"\n ) from exc\n else:\n # Check missing start_week == end_week\n try:\n assert all(df[\"timestamp\"] == df[\"end_week\"])\n except AssertionError as exc:\n raise ValueError(\n \"end_week is not always the same as start_week, check the raw file\"\n ) from exc\n\n try:\n df = df.astype(type_dict)\n except KeyError as exc:\n raise ValueError(f\"\"\"\nExpected column(s) missed, The dataset schema may\nhave changed. Please investigate and amend the code.\n\nColumns needed:\n{NEWLINE.join(type_dict.keys())}\n\nColumns available:\n{NEWLINE.join(df.columns)}\n\"\"\") from exc\n\n # Drop rows for locations outside US\n df = df[df[\"state\"] != \"United States\"]\n df = df.loc[:, keep_columns + [\"timestamp\", \"state\"]].set_index(\"timestamp\")\n\n # NCHS considers NYC as an individual state, however, we want it included\n # in NY. If values are nan for both NYC and NY, the aggreagtion should\n # also have NAN.\n df_ny = df.loc[df[\"state\"] == \"New York\", :].drop(\"state\", axis=1)\n df_nyc = df.loc[df[\"state\"] == \"New York City\", :].drop(\"state\", axis=1)\n # Get mask df to ignore cells where both of them have NAN values\n mask = (df_ny[keep_columns].isnull().values \\\n & df_nyc[keep_columns].isnull().values)\n df_ny = df_ny.append(df_nyc).groupby(\"timestamp\").sum().where(~mask, np.nan)\n df_ny[\"state\"] = \"New York\"\n # Drop NYC and NY in the full dataset\n df = df.loc[~df[\"state\"].isin([\"New York\", \"New York City\"]), :]\n df = df.append(df_ny).reset_index().sort_values([\"state\", \"timestamp\"])\n # Add population info\n keep_columns.extend([\"timestamp\", \"geo_id\", \"population\"])\n gmpr = GeoMapper()\n df = gmpr.add_population_column(df, \"state_name\", geocode_col=\"state\")\n df = gmpr.add_geocode(df, \"state_name\", \"state_id\", from_col=\"state\", new_col=\"geo_id\")\n return df[keep_columns]", "def test_codon_usage_custom(self):\n # We need a FASTA file of CDS sequences to count the codon usage...\n dna_fasta_filename = \"fasta.tmp\"\n dna_genbank_filename = \"GenBank/NC_005816.gb\"\n record = SeqIO.read(dna_genbank_filename, \"genbank\")\n records = []\n for feature in record.features:\n if feature.type == \"CDS\" and len(feature.location.parts) == 1:\n start = feature.location.start.position\n end = feature.location.end.position\n table = int(feature.qualifiers[\"transl_table\"][0])\n if feature.strand == -1:\n seq = record.seq[start:end].reverse_complement()\n else:\n seq = record.seq[start:end]\n # Double check we have the CDS sequence expected\n # TODO - Use any cds_start option if/when added to deal with the met\n a = \"M\" + str(seq[3:].translate(table))\n b = feature.qualifiers[\"translation\"][0] + \"*\"\n self.assertEqual(a, b, \"%r vs %r\" % (a, b))\n records.append(SeqRecord(seq, id=feature.qualifiers[\"protein_id\"][0],\n description=feature.qualifiers[\"product\"][0]))\n\n with open(dna_fasta_filename, \"w\") as handle:\n SeqIO.write(records, handle, \"fasta\")\n\n CAI = CodonAdaptationIndex()\n # Note - this needs a FASTA file which containing non-ambiguous DNA coding\n # sequences - which should each be a whole number of codons.\n CAI.generate_index(dna_fasta_filename)\n # Now check codon usage index (CAI) using this species\n self.assertEqual(record.annotations[\"source\"],\n \"Yersinia pestis biovar Microtus str. 91001\")\n self.assertEqual(\"%0.5f\" % CAI.cai_for_gene(\"ATGCGTATCGATCGCGATACGATTAGGCGGATG\"),\n \"0.67213\")\n os.remove(dna_fasta_filename)", "def testhospital_vs_confirmed(self):\n data = load_covid_data(file)\n aim_day = data['evolution']['2020-03-16']\n # Artificial cut one value , it supposed to be 4 number\n aim_day['epidemiology']['confirmed']['total']['age'] = [10, 11, 12]\n try:\n cases_population = cases_per_population_by_age(data)\n except Exception as e:\n raise Exception", "def test_change_of_year(self):\n\n input_ = [\n self.indicator_record(date=datetime.date(2006, 11, 1), value=0.31),\n self.indicator_record(date=datetime.date(2006, 12, 1), value=0.48),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n expected = self.indicator_record(date=datetime.date(2007, 1, 1), value=0.35)\n actual = output[-1]\n\n self.assertEqual(expected, actual)", "def test_cci(self):\n q = qufilab.cci(self.close, self.high, self.low, 200)\n t = talib.CCI(self.high, self.low, self.close, 200)\n np.testing.assert_allclose(q, t, rtol = self.tolerance)", "def test_taxa_chao1(self):\n table_factory = DataTableFactory(PACKET_DIR)\n chao1 = table_factory.taxa_alpha_diversity(metric='chao1', rarefy=1000 * 1000)\n self.assertTrue((chao1 > 0).all())", "def test_overall_report_banner_ecpm():\n assert (overall_data['banner_report']['data'][6][0] == 'eCPM')\n for num in overall_data['banner_report']['data'][6][1:]:\n assert (num == 4000)", "def setUp(self):\n params = {\"date\": CONSTITUENCY_YEAR.strftime(\"%Y\")}\n url = twfy.svcurl(\"getConstituencies\", params)\n data = pickle.load(open(\"signup/unit_tests/twfy.getConstituencies\"))\n twfy.fetch.prime(url, data)", "def test_get_crime_description(self):\n\n # can use the sample crime reports data\n self.data = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.descriptions = utils.populate_offence(self.data)\n\n self.assertTrue(isinstance(self.descriptions, pd.DataFrame))\n\n # check anti-social behaviour match is anti-social behaviour and is lowercase\n self.assertEqual(self.descriptions.Crime_description[0], 'anti-social behaviour')\n\n # check value is contained in list of crime descriptions for Violence and sexual offences\n self.assertTrue(self.descriptions.Crime_description[7] in ['abuse of children through sexual exploitation',\n 'abuse of position of trust of a sexual nature',\n 'assault with injury', 'assault with injury on a constable',\n 'assault with intent to cause serious harm',\n 'assault without injury', 'assault without injury on a constable',\n 'attempted murder', 'causing death by aggravated vehicle taking',\n 'causing death by careless driving under influence of drink or drugs',\n 'causing death by careless or inconsiderate driving',\n 'causing death by driving: unlicensed or disqualified or uninsured drivers',\n 'causing death or serious injury by dangerous driving',\n 'causing or allowing death of child or vulnerable person',\n 'causing sexual activity without consent', 'child abduction',\n 'conspiracy to murder', 'cruelty to children/young persons',\n 'endangering life', 'exposure and voyeurism', 'harassment',\n 'homicide', 'incest or familial sexual offences',\n 'intentional destruction of a viable unborn child', 'kidnapping',\n 'malicious communications', 'modern slavery',\n 'other miscellaneous sexual offences',\n 'procuring illegal abortion',\n 'racially or religiously aggravated assault with injury',\n 'racially or religiously aggravated assault without injury',\n 'racially or religiously aggravated harassment',\n 'rape of a female aged 16 and over',\n 'rape of a female child under 13',\n 'rape of a female child under 16',\n 'rape of a male aged 16 and over', 'rape of a male child under 13',\n 'rape of a male child under 16',\n 'sexual activity etc with a person with a mental disorder',\n 'sexual activity involving a child under 13',\n 'sexual activity involving child under 16',\n 'sexual assault on a female aged 13 and over',\n 'sexual assault on a female child under 13',\n 'sexual assault on a male aged 13 and over',\n 'sexual assault on a male child under 13', 'sexual grooming',\n 'stalking', 'threats to kill',\n 'trafficking for sexual exploitation', 'unnatural sexual offences']\n )\n\n #self.assertEqual(self.descriptions.columns.tolist(), ['UID','datetime','Crime_description','Crime_type','LSOA_code','Police_force'])" ]
[ "0.591036", "0.59009093", "0.5780899", "0.5772635", "0.5762341", "0.572158", "0.5605917", "0.5557134", "0.5451257", "0.5441077", "0.5409767", "0.537112", "0.5370728", "0.533519", "0.5321917", "0.5318306", "0.52777225", "0.5264037", "0.5263327", "0.52472895", "0.524456", "0.5229085", "0.5209243", "0.5204605", "0.51926315", "0.51871383", "0.51808965", "0.51741576", "0.51688457", "0.5152745" ]
0.6492486
0
Description Testing the the Adjei method that we coded gives the same answer as the Adjei method in the book. We know that the Crofton Method works pretty well. These results show that, as far as I can tell, our implementation of the Adjei method is returning pretty similar results to what Adjei is showing for St males, SU males and SU females. We are getting very different answers with ST females and looking at their answers, it doesn't seem like they should be getting what they are getting. They calculate the LD50 for the ST females to be around 5.7
def test_adjei_method(self): # Specify the Adjei Method Data st_females = np.repeat((0, 1, 2, 3, 4, 5, 6, 7), (201, 114, 63, 37, 19, 5, 3, 4)) st_males = np.repeat((0, 1, 2, 3, 4, 5), (226, 128, 62, 30, 3, 3)) su_females = np.repeat(np.arange(0, 8), (2311, 180, 66, 8, 5, 2, 0, 1)) su_males = np.repeat((0, 1, 2, 3, 4), (2257, 146, 29, 7, 1)) # Fit the adjei method st_fem = PIHM(st_females) st_fem_fit = st_fem.adjei_method([], [0, 1, 2, 2.9], no_bins=True, run_crof=True) # Fit males using females st_male = PIHM(st_males) st_male.set_premort_params(st_fem_fit[0], st_fem_fit[1], st_fem_fit[2]) st_male_fit = st_male.adjei_method([], [], no_bins=True, run_crof=False) # Fit the females su_fem = PIHM(su_females) su_fem_fit = su_fem.adjei_method([], [0, 1, 2, 2.9], no_bins=True, run_crof=True) su_male = PIHM(su_males) su_male.set_premort_params(su_fem_fit[0], su_fem_fit[1], su_fem_fit[2]) su_male_fit = su_male.adjei_method([], [], no_bins=True, run_crof=False) fits = [st_fem_fit, st_male_fit, su_fem_fit, su_male_fit] ld50 = lambda x: np.exp(x[3] / np.abs(x[4])) pred_ld50 = np.round([ld50(f) for f in fits], decimals=1) adjei_ld50 = [5.7, 3.4, 3.2, 1.8] exp_ld50 = [6.4, 3.7, 3.1, 1.8] assert_array_equal(exp_ld50, pred_ld50) print(zip(pred_ld50, adjei_ld50))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_bias(ds: List[str], predicted: List[GENDER]) -> Dict:\r\n assert (len(ds) == len(predicted)) # must have same length to create tuples\r\n\r\n conf_dict = defaultdict(lambda: defaultdict(lambda: 0))\r\n total = defaultdict(lambda: 0) # increment values if we have any gender\r\n pred_cnt = defaultdict(lambda: 0)\r\n correct_cnt = defaultdict(lambda: 0) # increment values if true_gender == predicted_gender\r\n\r\n count_unknowns = defaultdict(lambda: 0)\r\n\r\n for (gold_gender, word_ind, sent, profession), pred_gender in zip(ds, predicted):\r\n # # IMPORTANTE NOTE :\r\n # need to works with .name of GENDER object for an unknown reason\r\n\r\n if isinstance(pred_gender, str): # can happen in spacy languages 'fr', 'es' or 'it\r\n pred_gender = SPACY_GENDER_TYPES[pred_gender]\r\n\r\n # tuples of values in ds and values in predicted\r\n if pred_gender.name == GENDER.ignore.name:\r\n continue # skip analysis of ignored words\r\n\r\n gold_gender = WB_GENDER_TYPES[gold_gender] # allows Winobias gender type conversion\r\n\r\n if pred_gender.name == GENDER.unknown.name:\r\n count_unknowns[gold_gender] += 1 # increment values for any unknown pred_gender\r\n\r\n profession = profession.lower()\r\n\r\n total[gold_gender] += 1\r\n\r\n if pred_gender.name == gold_gender.name:\r\n correct_cnt[gold_gender] += 1\r\n\r\n pred_cnt[pred_gender.name] += 1\r\n\r\n conf_dict[gold_gender][pred_gender] += 1\r\n\r\n all_total = sum(total.values())\r\n\r\n output_dict = {} # init output dictionnary\r\n # Compute metrics\r\n accuracy = round((sum(correct_cnt.values()) / all_total) * 100, 1) # compute accuracy\r\n output_dict['acc'] = accuracy\r\n\r\n if (total[GENDER.male] == 0) | (pred_cnt[GENDER.male.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_male'] = None\r\n else:\r\n recall_male = round((correct_cnt[GENDER.male] / total[GENDER.male]) * 100, 1) # compute metrics for male\r\n prec_male = round((correct_cnt[GENDER.male] / pred_cnt[GENDER.male.name]) * 100, 1)\r\n f1_male = round(calc_f1(prec_male, recall_male), 1)\r\n output_dict['f1_male'] = f1_male\r\n\r\n if (total[GENDER.female] == 0) | (pred_cnt[GENDER.female.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_female'] = None\r\n else:\r\n recall_female = round((correct_cnt[GENDER.female] / total[GENDER.female]) * 100, 1) # calcul metrics for female\r\n prec_female = round((correct_cnt[GENDER.female] / pred_cnt[GENDER.female.name]) * 100, 1)\r\n f1_female = round(calc_f1(prec_female, recall_female), 1)\r\n output_dict['f1_female'] = f1_female\r\n\r\n output_dict['unk_male'] = count_unknowns[GENDER.male]\r\n output_dict['unk_female'] = count_unknowns[GENDER.female]\r\n output_dict['unk_neutral'] = count_unknowns[GENDER.neutral]\r\n\r\n return output_dict", "def ACE(count, rare_threshold=10):\n \n def frequency_counter(count):\n \"\"\"Creates a frequency count array to beused by every other function.\"\"\"\n return counts(count)\n \n def species_rare(freq_counts, rare_threshold):\n \"\"\"freq_counts number of rare species. Default value of rare is 10 or\n fewer individuals. Based on Chao 2000 in Statistica Sinica pg. 229 \n citing empirical observations by Chao, Ma and Yang in 1993.\"\"\"\n return freq_counts[1:rare_threshold+1].sum()\n \n def species_abundant(freq_counts, rare_threshold):\n \"\"\"freq_counts number of abundant species. Default value of abundant is\n greater than 10 individuals. Based on Chao 2000 in Statistica Sinica \n pg.229 citing observations by Chao, Ma and Yang in 1993.\"\"\"\n return freq_counts[rare_threshold+1:].sum()\n\n def number_rare(freq_counts, gamma=False):\n \"\"\"Number of individuals in rare species. gamma=True generates the\n n_rare used for the variation coefficient.\"\"\"\n \n n_rare=0 \n if gamma == True:\n for i, j in enumerate(freq_counts[:rare_threshold+1]):\n n_rare = n_rare + (i*j)*(i-1)\n return n_rare\n \n for i, j in enumerate(freq_counts[:rare_threshold+1]):\n n_rare = n_rare + (i*j)\n return n_rare\n \n # calculations begin\n \n freq_counts = frequency_counter(count)\n \n if freq_counts[1:rare_threshold].sum() == 0:\n return species_abundant(freq_counts, rare_threshold)\n\n if freq_counts[1] == freq_counts[1:rare_threshold].sum():\n raise ValueError(\"only rare species are singletons, ACE \"+\\\n \"metric is undefined. EstimateS suggests using bias corrected Chao1\")\n \n s_abun = species_abundant(freq_counts, rare_threshold) \n\n \n s_rare = species_rare(freq_counts, rare_threshold)\n\n n_rare = number_rare(freq_counts)\n\n c_ace = 1 - (freq_counts[1]).sum()/float(n_rare)\n\n top = s_rare*number_rare(freq_counts, gamma=True)\n bottom = c_ace*n_rare*(n_rare-1.0)\n \n gamma_ace = (top/bottom) - 1.0\n \n if 0 > gamma_ace:\n gamma_ace = 0\n\n return s_abun + (s_rare/c_ace) + ((freq_counts[1]/c_ace)*gamma_ace)", "def affichage(sequenceRules,sequenceData,Paires,ecart,tolerance) :\n print(\"-------------sequenceData---------------\")\n print(sequenceData)\n #groupPaires\n groupPairesResult=groupPaires(Paires,10)\n #print(\"--------Group Paires: --------------\")\n #print(groupPairesResult)\n \n #biggerGroup\n #biggerGroupResult=biggerGroup(sequence,groupPairesResult,2)\n #print(\"--------Bigger group : --------------\")\n #print(biggerGroupResult)\n\n #winepi\n winepiResultGroupPaire=winepi(groupPairesResult,0.6)\n #print(\"--------Winepi group paire : --------------\")\n #print_big_rules(winepiResultGroupPaire)\n\n #winepiResult=winepi(biggerGroupResult,0.8)\n #print(\"--------Winepi bigger Group : --------------\")\n #print_rules(winepiResult)\n \n \n print(\"\\n\")\n print(\"-------event->paire----------\")\n winepiResult=winepi2(sequenceRules,Paires,0.6)\n predict_e=prediction2(sequenceData,Paires,winepiResult)\n #for p in predict_e :\n # print(p)\n #print(\"\\n\")\n rules=accuracy(predict_e,sequenceData,winepiResult)\n n_ok,n_ko,n_bet,avg_gap =analyse_result(rules,tolerance)\n #print_event_rules(rules)\n print(\"rules ok : \",n_ok,\" rules under : \",n_ko,\" rules over : \",n_bet,\" average gap : \",avg_gap)\n \n \n \n #print(\"--------Winepi group paire : --------------\")\n #print_rules(winepiResult)\n\n #winepiResult=winepi(biggerGroupResult,0.8)\n print(\"--------Winepi bigger Group : --------------\")\n #print_rules(winepiResult)\n print(\"\\n\")\n print(\"--------PrΓ©diction : paire-> triplet --------------\")\n predict_p=prediction(sequenceData,Paires,winepiResultGroupPaire,ecart)\n #for p in predict_p :\n # print(p)\n #print(\"\\n\")\n rules=accuracy(predict_p,sequenceData,winepiResultGroupPaire)\n #print_big_rules(rules)\n print(\"\\n\")\n n_ok,n_ko,n_bet,avg_gap =analyse_result(rules,tolerance)\n print(\"rules ok : \",n_ok,\" rules under : \",n_ko,\" rules over : \",n_bet,\" average gap : \",avg_gap)\n print(\"\\n\\n\")\n\n return None", "def cost(period_exams):\n\n student_periods = {}\n\n for aperiod in period_exams:\n for e in period_exams[aperiod]:\n for s in es.exam_students[e]:\n if s not in student_periods:\n student_periods[s] = [aperiod]\n else:\n student_periods[s].append(aperiod)\n\n numofstudents = len(student_periods)\n\n cost = 0\n d = 0\n cost_value = [16, 8, 4, 2, 1]\n for s in student_periods:\n mycal = sorted(student_periods[s])\n for (i, eachexam) in enumerate(sorted(mycal)):\n for j in range(i+1, i+6):\n if j < len(mycal):\n d = mycal[j] - mycal[i]\n if d > 5:\n cost += 0\n else:\n cost += cost_value[d-1]\n\n print(\"-\"*15)\n print(f'Total cost of this problem is {cost/numofstudents:.4f}')\n print(\"-\"*15)\n return(cost/numofstudents)", "def evaluate(gold_seg, pred_seg):\n ### Exercise 6.3\n BP_total_pred, BP_correct_pred, BP_gold = 0, 0, 0\n WP_total_pred, WP_correct_pred, WP_gold = 0, 0, 0\n LP_total_pred, LP_correct_pred, LP_gold = 0, 0, 0\n\n gold_lexicon = []\n pred_lexicon = []\n\n # Cycle through each utterence, tallying True positives, total predictions, and gold standards\n for gold_list, pred_list in zip(gold_seg, pred_seg):\n cur_gold_utter = []\n cur_pred_utter = []\n gold_bounds = []\n pred_bounds = []\n gold_pos = 0\n pred_pos = 0\n # Find the bounds of each segment\n for g in gold_list:\n gold_bounds.append(gold_pos)\n cur_gold_utter.append((g, gold_pos))\n gold_pos += len(g)\n for p in pred_list:\n pred_bounds.append(pred_pos)\n cur_pred_utter.append((p, pred_pos))\n pred_pos += len(p)\n # Check if bounds are and tally\n for bound in pred_bounds:\n if bound == 0:\n continue\n if bound in gold_bounds:\n BP_correct_pred += 1\n BP_gold = BP_gold + len(gold_bounds) - 1\n BP_total_pred = BP_total_pred + len(pred_bounds) - 1\n # Check if predicted words are correct and tally. Also check if word is in lexicon\n for segment in cur_pred_utter:\n if segment[0] == \"#\":\n continue\n if segment in cur_gold_utter:\n WP_correct_pred += 1\n WP_total_pred += 1\n if segment[0] not in pred_lexicon:\n LP_correct_pred += 1\n LP_total_pred += 1\n pred_lexicon.append(segment[0])\n else:\n WP_total_pred += 1\n if segment[0] not in pred_lexicon:\n LP_total_pred += 1\n pred_lexicon.append(segment[0])\n # Tally gold standard words and lexicon\n for segment in cur_gold_utter:\n if segment[0] is not \"#\":\n if segment[0] not in gold_lexicon:\n gold_lexicon.append(segment[0])\n LP_gold += 1\n WP_gold += 1\n # Calculate stats\n BP = BP_correct_pred / BP_total_pred\n BR = BP_correct_pred / BP_gold\n BF1 = 2 * ((BP * BR) / (BP + BR))\n\n WP = WP_correct_pred / WP_total_pred\n WR = WP_correct_pred / WP_gold\n WF1 = 2 * ((WP * WR) / (WP + WR))\n\n LP = LP_correct_pred / LP_total_pred\n LR = LP_correct_pred / LP_gold\n LF1 = 2 * ((LP * LR) / (LP + LR))\n\n print(\"Boundary Precision: {}\\n Boundary Recall: {}\\n Boundary F1: {}\"\n .format(BP, BR, BF1))\n print(\"Word Precision: {}\\n Word Recall: {}\\n Word F1: {}\"\n .format(WP, WR, WF1))\n print(\"Lexicon Precision: {}\\n Lexicon Recall: {}\\n Lexicon F1: {}\"\n .format(LP, LR, LF1))", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n \"\"\n foodPos = currentGameState.getFood().asList() \n foodDist = [] \n ghostStates = currentGameState.getGhostStates() \n capPos = currentGameState.getCapsules() \n currentPos = list(currentGameState.getPacmanPosition()) \n \n for food in foodPos:\n food2pacmanDist = manhattanDistance(food, currentPos)\n foodDist.append(-1*food2pacmanDist)\n \n if not foodDist:\n foodDist.append(0)\n\n return max(foodDist) + currentGameState.getScore()", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n pos = currentGameState.getPacmanPosition()\n score1 = currentGameState.getScore()\n \n\n Food = currentGameState.getFood()\n foodlist = list(Food.asList())\n dist = []\n score2 = 0\n if len(foodlist) != 0:\n for food in foodlist:\n dist.append(manhattanDistance(pos, food))\n mindist = min(dist)\n score2 = 10/mindist + len(foodlist)\n\n score3 = 0\n for capsule in currentGameState.getCapsules():\n if manhattanDistance(pos, capsule) <= 3:\n score3 = 100\n\n score4 = 0\n GhostStates = currentGameState.getGhostStates()\n \n for ghostState in GhostStates:\n ScaredTimes = ghostState.scaredTimer\n\n gdist = manhattanDistance(pos, ghostState.getPosition())\n if ScaredTimes <= 1 and gdist <= 2:\n score4 -= 1000/(gdist+1)\n elif ScaredTimes > 2:\n if gdist <= 20:\n score4 += 500/(gdist+1) \n\n return 1.5 * score1 + score2 + score3 + 3 * score4", "def englishtest(result, etaoin_shrdlu=[12.02,9.1,8.12,7.68,7.31,6.95,6.28,6.02,5.92,4.32,3.98,2.88]):\n \n a = len(result)\n single = []\n for i in range(12):\n single.append(9999)\n total = 0\n single[0]= result.count(b'e') + result.count(b'E')\n single[1]= result.count(b't') + result.count(b'T')\n single[2]= result.count(b'a') + result.count(b'A')\n single[3]= result.count(b'o') + result.count(b'O')\n single[4]= result.count(b'i') + result.count(b'I')\n single[5]= result.count(b'n') + result.count(b'N')\n single[6]= result.count(b's') + result.count(b'S')\n single[7]= result.count(b'h') + result.count(b'H')\n single[8]= result.count(b'r') + result.count(b'R')\n single[9]= result.count(b'd') + result.count(b'D')\n single[10]= result.count(b'l') + result.count(b'L')\n single[11]= result.count(b'u') + result.count(b'U')\n\n for i in range(12):\n if single[i] == 0:\n single[i] =100\n else:\n single[i] = single[i]/a\n for i in single:\n total = total + i\n \n return total, single", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n pacmanPos = currentGameState.getPacmanPosition()\n\n food = currentGameState.getFood()\n capsules = currentGameState.getCapsules()\n return currentGameState.getScore() - 10 * capsuleDistancePlan(pacmanPos, capsules) - foodDistPlan(pacmanPos, food)", "def calories_alcohol(og, fg):\n\n return 25.2 * fg * abw(og, fg)", "def test_ford_fulkerson_algo() -> np.ndarray:\r\n res = ford_fulkerson_algorithm(np.array(ex_sample_graph), 0, 10)\r\n print(\"Result: \")\r\n print(res)\r\n return res", "def AmOppCr(_cmp, e87482, e87487, e87492, e87497):\n\n \"\"\"\n This function calculates American Opportunity Credit\n for up to four eligible students\n\n \"\"\"\n\n # Expense should not exceed the cap of $4000.\n if _cmp == 1:\n\n c87482 = max(0., min(e87482, 4000.))\n c87487 = max(0., min(e87487, 4000.))\n c87492 = max(0., min(e87492, 4000.))\n c87497 = max(0., min(e87497, 4000.))\n else:\n c87482, c87487, c87492, c87497 = 0., 0., 0., 0.\n\n # Credit calculated as 100% of the first $2000 expense plus\n # 25% of amount exceeding $2000.\n if max(0, c87482 - 2000) == 0:\n c87483 = c87482\n else:\n c87483 = 2000 + 0.25 * max(0, c87482 - 2000)\n\n if max(0, c87487 - 2000) == 0:\n c87488 = c87487\n else:\n c87488 = 2000 + 0.25 * max(0, c87487 - 2000)\n\n if max(0, c87492 - 2000) == 0:\n c87493 = c87492\n else:\n c87493 = 2000 + 0.25 * max(0, c87492 - 2000)\n\n if max(0, c87497 - 2000) == 0:\n c87498 = c87497\n else:\n c87498 = 2000 + 0.25 * max(0, c87497 - 2000)\n\n # Sum of credits of all four students.\n c87521 = c87483 + c87488 + c87493 + c87498\n\n return (c87482, c87487, c87492, c87497, c87483, c87488, c87493, c87498,\n c87521)", "def test_success(database):\n # Create a 12 character random fain\n fain_1 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_2 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_3 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_4 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n\n # Just some basic sums to make sure it works\n af_1_row_1 = AwardFinancialFactory(transaction_obligated_amou=1100, fain=fain_1, allocation_transfer_agency=None)\n af_1_row_2 = AwardFinancialFactory(transaction_obligated_amou=11, fain=fain_1.lower(),\n allocation_transfer_agency=None)\n # Non-ignored rows with a matching ATA/AID\n af_2_row_1 = AwardFinancialFactory(transaction_obligated_amou=9900, fain=fain_2, allocation_transfer_agency=None)\n af_2_row_2 = AwardFinancialFactory(transaction_obligated_amou=99, fain=fain_2, allocation_transfer_agency='good',\n agency_identifier='good')\n # Ignored row with non-matching ATA/AID\n af_3 = AwardFinancialFactory(transaction_obligated_amou=8888, fain=fain_3, allocation_transfer_agency='good',\n agency_identifier='bad')\n # No TOA in File C, ignored\n af_4 = AwardFinancialFactory(transaction_obligated_amou=None, piid=fain_4.lower(),\n allocation_transfer_agency='good', agency_identifier='good')\n\n # Fain sums for AFA\n afa_1_row_1 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='2')\n afa_1_row_2 = AwardFinancialAssistanceFactory(fain=fain_1.lower(), federal_action_obligation=-10,\n original_loan_subsidy_cost=None, record_type='3')\n # original loan subsidy cost used in this row because assistance type is '08'\n afa_1_row_3 = AwardFinancialAssistanceFactory(fain=fain_1, original_loan_subsidy_cost=-1, assistance_type='08',\n federal_action_obligation=None, record_type='2')\n # federal action obligation used in this row (it's 0), because assistance type is not 07 and 08\n afa_1_row_4 = AwardFinancialAssistanceFactory(fain=fain_1, original_loan_subsidy_cost=-2222, assistance_type='09',\n federal_action_obligation=None, record_type='3')\n # Ignored because record type 1\n afa_1_row_5 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='1')\n # Fain 2 Test for non-ignored ATA\n afa_2 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=-9999,\n original_loan_subsidy_cost=None, record_type='2')\n # Fain 3 test for ignoring a non-matching ATA/AID\n afa_3 = AwardFinancialAssistanceFactory(fain=fain_3, federal_action_obligation=-9999, record_type='3')\n\n # This one matches but will be ignored\n afa_4 = AwardFinancialAssistanceFactory(fain=fain_4, federal_action_obligation=-9999)\n\n errors = number_of_errors(_FILE, database, models=[af_1_row_1, af_1_row_2, af_2_row_1, af_2_row_2, af_3, af_4,\n afa_1_row_1, afa_1_row_2, afa_1_row_3, afa_1_row_4, afa_1_row_5,\n afa_2, afa_3, afa_4])\n assert errors == 0", "def americanprice(self):\n self.americanpay = np.zeros((self.steps+1,self.steps+1))\n self.optionvalue = np.zeros((self.steps+1,self.steps+1))\n self.exercisevalue = np.zeros((self.steps+1,self.steps+1))\n self.americanpay[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n self.optionvalue[i-1][j] = (self.americanpay[i][j]*self.upprob + self.americanpay[i][j+1]*(1-self.upprob))/discount\n self.exercisevalue[i-1][j] = max(self.pricetree[i-1][j]-self.s,0.0)\n self.americanpay[i-1][j] = max(self.optionvalue[i-1][j],self.exercisevalue[i-1][j])\n return self.americanpay[0][0]", "def get_accuracy(self, gold, predicted):\n # Exercise 3: calculate accuracy\n i = 0\n j = 0\n for labels in gold:\n if labels == predicted[i]:\n j +=1\n i +=1\n return j / i * 100", "def f1(predictions, gold):\n if len(gold) == 0:\n return 1. if len(predictions) == 0 else 0.\n if len(predictions) == 0:\n return 0.\n predictions_set = set(predictions)\n gold_set = set(gold)\n nom = 2 * len(predictions_set.intersection(gold_set))\n denom = len(predictions_set) + len(gold_set)\n return float(nom)/float(denom)", "def betterEvaluationFunction(currentGameState):\n\n pacman_pos = currentGameState.getPacmanPosition()\n food = currentGameState.getFood()\n ghost_states = currentGameState.getGhostStates()\n scared_times = [g.scaredTimer for g in ghost_states]\n\n anyfood = searchAgents.AnyFoodSearchProblem(currentGameState)\n food_distance = search.bfs(anyfood)\n food_distance = 1 / len(food_distance) if food_distance else 0\n\n enemy_dist = [manhattanDistance(pacman_pos, g.configuration.pos)\n for g in ghost_states]\n for i, d in enumerate(enemy_dist):\n d = d if d != 0 else 0.00001\n d = 1 / d if d < 5 or scared_times[i] != 0 else 0\n d = 0 if scared_times[i] == 0 else -d\n enemy_dist[i] = d\n enemy_dist = sum(enemy_dist)\n\n score = currentGameState.getScore()\n return score + 0.1*food_distance - enemy_dist", "def calculateMarriedTax(husbandIncome, wifeIncome):\r\n pass\r\n a = husbandIncome + wifeIncome\r\n if a == 0:\r\n return 0 \r\n elif 1 <= a <= 19050:\r\n return 10\r\n elif 19051 <= a <= 77400:\r\n return 12\r\n elif 77401 <= a <= 165000:\r\n return 22\r\n elif 165001 <= a <= 315000:\r\n return 24\r\n elif 315001 <= a <= 400000:\r\n return 32\r\n elif 400001 <= a <= 600000:\r\n return 35\r\n else:\r\n return 37", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n if currentGameState.isWin():\n return float(\"inf\")\n elif currentGameState.isLose():\n return - float(\"inf\")\n\n\n score = scoreEvaluationFunction(currentGameState) \n current_state = currentGameState.getPacmanPosition()\n\n #for food\n \n Food= currentGameState.getFood() \n food_list = Food.asList()\n food_pos = []\n for k in food_list:\n a=manhattanDistance(k,current_state)\n food_pos.append(a)\n for i in food_pos:\n if(i==0):\n score=score + 100\n else:\n score=score + (1.0/(i**2))\n\n #for ghosts\n ghost_list = currentGameState.getGhostStates() \n ghost_distance = []\n scared_ghost = [] \n distance=[]\n for ghost in ghost_list:\n ghost_position= ghost.getPosition()\n pos = manhattanDistance(current_state, ghost_position)\n distance.append(pos)\n if ghost.scaredTimer == 0:\n ghost_distance += distance\n elif ghost.scaredTimer > 0:\n scared_ghost += distance\n min_ghost_dist = -1\n min_scared_ghost_dist = -1\n if len(ghost_distance) > 0:\n min_ghost_dist = min(ghost_distance)\n \n elif len(scared_ghost) > 0:\n min_scared_ghost_dist = min(scared_ghost)\n \n score = score - (2 / min_ghost_dist)\n score = score - (2 * min_scared_ghost_dist)\n\n\n #For capsules\n capsules = currentGameState.getCapsules()\n capsule_len = len(capsules) \n score = score - (15 * capsule_len)\n return score\n util.raiseNotDefined()", "def overall_performance_prf(articles, skip_nils=True, skip_nonnils=False):\n tp=0\n fn=0\n fp=0\n for article in articles:\n for entity in article.entity_mentions:\n if skip_nils and entity.gold_link=='--NME--':\n continue\n if skip_nonnils and entity.gold_link!='--NME--':\n continue\n if entity.gold_link==entity.sys_link:\n tp+=1\n else:\n if entity.sys_link!='--NME--':\n fp+=1\n if entity.gold_link!='--NME--':\n fn+=1\n print(tp, fp, fn)\n p=tp/(tp+fp) \n r=tp/(tp+fn) \n f1=2*p*r/(p+r)\n print(p,r,f1)\n return f1", "def test_anglicize():\n print('Testing anglicize')\n result = funcs.anglicize(1)\n introcs.assert_equals(\"one\", result)\n\n result = funcs.anglicize(19)\n introcs.assert_equals(\"nineteen\", result)\n\n result = funcs.anglicize(20)\n introcs.assert_equals(\"twenty\", result)\n\n result = funcs.anglicize(35)\n introcs.assert_equals(\"thirty five\", result)\n\n result = funcs.anglicize(50)\n introcs.assert_equals(\"fifty\", result)\n\n result = funcs.anglicize(99)\n introcs.assert_equals(\"ninety nine\", result)\n\n result = funcs.anglicize(100)\n introcs.assert_equals(\"one hundred\", result)\n\n result = funcs.anglicize(301)\n introcs.assert_equals(\"three hundred one\", result)\n\n result = funcs.anglicize(999)\n introcs.assert_equals(\"nine hundred ninety nine\", result)\n\n result = funcs.anglicize(1000)\n introcs.assert_equals(\"one thousand\", result)\n\n result = funcs.anglicize(1009)\n introcs.assert_equals(\"one thousand nine\", result)\n\n result = funcs.anglicize(900000)\n introcs.assert_equals(\"nine hundred thousand\", result)\n\n result = funcs.anglicize(789436)\n introcs.assert_equals(\"seven hundred eighty nine thousand four hundred thirty six\",\n result)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newFood = newFood.asList()\n min_distance_f = -1\n for food in newFood:\n distance = util.manhattanDistance(newPos, food)\n if min_distance_f >= distance or min_distance_f == -1:\n min_distance_f = distance\n\n g_distance = 1\n prox_ghost = 0\n for g_state in currentGameState.getGhostPositions():\n distance = util.manhattanDistance(newPos, g_state)\n g_distance += distance\n if distance <= 1:\n prox_ghost += 1\n newCapsule = currentGameState.getCapsules()\n numCapsules = len(newCapsule)\n\n newScore = currentGameState.getScore() + (1 / float(min_distance_f)) - (1 / float(g_distance)) - prox_ghost - numCapsules\n return newScore", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def betterEvaluationFunction(currentGameState):\n if currentGameState.isWin():\n return float(\"inf\")\n elif currentGameState.isLose():\n return -float(\"inf\")\n\n pacman_pos = currentGameState.getPacmanPosition()\n capsules_left = len(currentGameState.getCapsules())\n all_food = currentGameState.getFood().asList()\n food_left = len(all_food)\n md_closest_food = min([util.manhattanDistance(pacman_pos, food) for food in all_food])\n\n scared_ghost, active_ghost =[], []\n for ghost in currentGameState.getGhostStates():\n if ghost.scaredTimer:\n scared_ghost.append(ghost)\n else:\n active_ghost.append(ghost)\n\n dist_nearest_scaredghost = dist_nearest_activeghost = 0\n\n if not len(scared_ghost):\n dist_nearest_scaredghost = 0\n\n if not len(active_ghost):\n dist_nearest_activeghost = float(\"inf\")\n\n if active_ghost:\n dist_nearest_activeghost = min([util.manhattanDistance(pacman_pos, ghost.getPosition()) for ghost in active_ghost])\n if dist_nearest_activeghost > 10:\n dist_nearest_activeghost = 10\n if scared_ghost:\n dist_nearest_scaredghost = min([util.manhattanDistance(pacman_pos, ghost.getPosition()) for ghost in scared_ghost])\n ans = currentGameState.getScore() + -1*md_closest_food + 2*(1.0/dist_nearest_activeghost) + 3*dist_nearest_scaredghost+ -4*capsules_left + -5*food_left\n \"\"\"\n This below line was used to collect those 2700 samples\n \"\"\"\n # ans = a1*currentGameState.getScore() + a2*md_closest_food + a3*(1.0/dist_nearest_activeghost) + a4*dist_nearest_scaredghost+ a5*capsules_left + a6*food_left\n\n return ans\n\n\n util.raiseNotDefined()", "def compute_acc_on_selection(arts, forms_set):\n correct=0\n total=0\n for article in arts:\n for entity in article.entity_mentions:\n if entity.mention in forms_set:\n total+=1\n if entity.gold_link==entity.sys_link:\n correct+=1\n print(correct, total)\n return correct/total", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n if currentGameState.isLose(): return -float(\"inf\")\n elif currentGameState.isWin(): return float(\"inf\")\n\n position = currentGameState.getPacmanPosition()\n score = scoreEvaluationFunction(currentGameState)\n\n foods = currentGameState.getFood().asList()\n food_distance = min(util.manhattanDistance(position, food) for food in foods)\n\n ghosts = currentGameState.getGhostStates()\n ghost_distance = max(5, min(util.manhattanDistance(position, ghost.getPosition()) for ghost in ghosts))\n\n return score - 1.5 * food_distance - 4 * len(foods) - 2.0 / ghost_distance\\\n - 20 * len(currentGameState.getCapsules())", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n GhostLocs = currentGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = currentGameState.getCapsules()\n Hueristic = 0.0\n \n if currentGameState.isWin():\n return 10000\n if currentGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)]\n\n if newPos in currentGameState.getCapsules():\n capsule = 100\n else: \n capsule = 0\n \n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 4:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*50\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n return Hueristic", "def find_female_adj(novel):\n return find_gender_adj(novel, True)", "def betterEvaluationFunction(currentGameState: GameState):\n \"*** YOUR CODE HERE ***\"\n ghostScore : float = 1\n nearGhosts : float = 0\n foodScore : float = 0\n curScore = currentGameState.getScore()\n\n nearestFood = [(0, 0), float('inf')]\n pacPos = currentGameState.getPacmanPosition()\n foodPoss= currentGameState.getFood().asList()\n capsulePoss = currentGameState.getCapsules()\n ghostPoss = currentGameState.getGhostPositions()\n\n for foodPos in foodPoss:\n val = manhattanDistance(foodPos, pacPos)\n if val < nearestFood[1]:\n nearestFood[1] = val\n nearestFood[0] = foodPos\n foodScore = nearestFood[1]\n \n for gpos in ghostPoss:\n val = manhattanDistance(pacPos, gpos)\n if val <= 1:\n nearGhosts += (1-val)\n ghostScore += val\n\n return curScore - (1/ghostScore) + (1/foodScore) - nearGhosts - len(capsulePoss)", "def eval_paradigm(gold, guess):\n correct, total = 0., 0.\n for lemma, D in gold.items():\n correct += 1\n total += 1\n for tag, str1 in D.items():\n str2 = u\"\" # empty string if no guess\n if lemma in guess and tag in guess[lemma]:\n str2 = guess[lemma][tag]\n if str1 != str2:\n correct -= 1\n break\n return round(correct/total*100, 2)" ]
[ "0.5681299", "0.5680674", "0.5543613", "0.55244184", "0.55180955", "0.5492687", "0.54871005", "0.5482099", "0.54445714", "0.5411524", "0.5401268", "0.53899825", "0.5369389", "0.5358834", "0.5356756", "0.5349101", "0.5346479", "0.5338135", "0.53196657", "0.5302856", "0.5299082", "0.5294665", "0.5291439", "0.5286722", "0.5276904", "0.5270857", "0.5237687", "0.5231684", "0.5226359", "0.5225212" ]
0.6242136
0
Search and return all sub nodes which match the node path. Each element of the list is matched against sub node names of increasing depth. Regular expression pattern matching is used for each name. For example, the list ['sub.', 'level2_name'] will match all nodes whose first level child name matches 'sub.' and whose second level child matches 'level2_name'.
def matchNodes(self, node_path_list): if type(node_path_list) != type([]): raise TypeError( 'argument not a list: "'+str(node_path_list)+'"' ) nodes = [self,] for pat in node_path_list: cpat = re.compile(pat) new_nodes = [] for nd in nodes: for kid in nd.getSubNodes(): if cpat.match(kid.name): new_nodes.append(kid) nodes = new_nodes if len(nodes) == 0: break return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_node_paths_by_full_object(self, name):\n components = re.split('[\\.:]', name)\n cur_node = self.top\n paths = []\n\n # Handle a case where we may have split things up by wildcard\n if '_' in components[0]:\n (left, right) = components[0].rsplit('_', 1)\n test_name = '{}_*'.format(left.lower())\n if test_name in cur_node.children:\n cur_node = cur_node.children[test_name]\n paths.append(cur_node)\n if len(components) == 1 and components[0][-1] == '*':\n return paths\n\n # Now iterate\n for component in components:\n cur_node = cur_node.children[component.lower()]\n paths.append(cur_node)\n\n # Return the list\n return paths", "def get_subnode_names(self) -> List[str]:\n\t\t# Variables\n\t\tnames: List[str] = []\n\n\t\t# Iterate over nodes\n\t\tfor subnode in self.subnodes:\n\t\t\tnames.append(subnode.name)\n\t\t# Return Names\n\t\treturn sorted(names, key=str.lower)", "def get_subnodes(self) -> List[SubNode]:\n\t\treturn sorted(self.subnodes, key=lambda x: attrib_key(x.name))", "def find_sub_node(tree, keys, limit_tags=['part', 'subpart'], force_tag=None):\n \"\"\" limit tags exists to prevent ambiguity between parts and section labels. however, sometimes we must treat\n parts etc like sections, for ranges etc \"\"\"\n node = tree\n xpath_query = \".//%s[%s]\"\n depth = lambda x: len(list(x.iterancestors()))\n shallowest = lambda nodes: nodes[0] if len(node) == 1 else sorted(map(lambda x: (x, depth(x)), nodes), key=itemgetter(1))[0][0]\n\n def get_closest(node, label):\n \"\"\" note: this is split between xpath and python for performance reasons (xpath too slow on ancestors) \"\"\"\n while True:\n try:\n tag = force_tag if force_tag else '*'\n nodes = node.xpath(xpath_query % (tag, labelize(label)))\n nodes = filter(lambda x: x.tag not in limit_tags and\n not len(set(map(lambda t: t.tag, x.iterancestors())).intersection(IGNORE_TRAVERSAL_TAGS)), nodes)\n return shallowest(nodes)\n\n except IndexError:\n node = node.getparent()\n if node is None or not len(node):\n raise StopIteration('no more parents')\n\n nodes = []\n\n try:\n for i, a in enumerate(keys):\n if a:\n adds = a.split('+')\n for add in adds:\n add = add.strip()\n if not add:\n continue\n elif '-' in add:\n # we can't assume any reasonable lexicographical ordering of labels, so instead\n # find first match and continue until last\n labels = [x.strip() for x in add.split('-')]\n # get first node\n start = get_closest(node, labels[0])\n last = get_closest(node, labels[1])\n # this sucks, having to start at start,\n tag = start.tag\n nodes.append(start)\n # try to find way to start iter at arbitrary node\n tree_iter = tree.iter(tag)\n current = None\n while True:\n current = next(tree_iter)\n if current == start:\n break\n while True:\n current = next(tree_iter)\n nodes.append(current)\n if current == last:\n break\n # find every tag that matches depth, until we match last\n else:\n nodes.append(get_closest(node, add.strip()))\n node = nodes\n if i < len(keys) - 1:\n node = nodes[-1]\n if not len(keys):\n nodes = [node]\n if not len(nodes):\n raise CustomException(\"Empty\")\n # remove ancestors\n ancestors = []\n for n in nodes:\n ancestors.extend(list(n.iterancestors()))\n nodes = [n for n in nodes if n not in ancestors]\n return nodes\n except (IndexError, StopIteration, AttributeError), e:\n raise CustomException(\"Path not found\")", "def search_nodes_by_pattern(self, pattern):\n searched_nodes = []\n if pattern and pattern != '/':\n pattern = pattern.lower()\n for name, node in self._normal_node_map.items():\n name = name.lower()\n pattern_index = name.rfind(pattern)\n if pattern_index >= 0 and name.find('/', pattern_index + len(pattern)) == -1:\n searched_nodes.append(node)\n return searched_nodes", "def get_subnodes(self) -> List[SubNode]:\n\t\t# Get all SubNodes\n\t\tsubnode_holder = NodeSubNodeHolder()\n\t\tfor rootnode in self.root_nodes:\n\t\t\tfor subnode in rootnode.get_subnodes():\n\t\t\t\tsubnode_holder.add_subnode(subnode)\n\n\t\t# Return\n\t\treturn subnode_holder.get_subnodes()", "def search_leaf_nodes_by_pattern(self, pattern, scope_pattern=False):\n is_match = lambda x, y: x.lower().startswith(y) if scope_pattern else y in x.lower()\n if pattern is not None:\n pattern = pattern.lower()\n searched_nodes = [\n node for name, node in self._leaf_nodes.items()\n if is_match(name, pattern)\n ]\n else:\n searched_nodes = [node for node in self._leaf_nodes.values()]\n return searched_nodes", "def node_find_by_name( fdt, node_name, starting_node = 0, multi_match=False ):\n\n matching_nodes = []\n matching_node = None\n\n search_active = False\n if starting_node == \"/\" or starting_node == 0:\n search_active = True\n\n for node in fdt.node_iter():\n if not search_active:\n if node.path == starting_node:\n search_active = True\n\n if search_active:\n if node.name == node_name:\n if not matching_nodes:\n matching_node = node\n matching_nodes.append( node )\n\n return matching_node, matching_nodes", "def get_nested_xml(xml, node_path):\n node_path = node_path.split('/')\n if len(node_path) > 2:\n raise ValueError(f\"Length of nodepath '{node_path}' >2 \") # can only handle parent/child path length\n\n if len(node_path) == 1:\n # if multiple matches for parent node, look for child node in both and return list of parent nodes\n # containing child node\n node_matches = [(x := get_xml_value(node_path[0], node))[0] for node in xml\n if (x := get_xml_value(node_path[0], node))]\n\n # # if only one node match, return it outside list, otherwise return all matches\n # if len(node_matches) == 1:\n # return node_matches[0]\n\n return node_matches\n\n return get_nested_xml(get_xml_value(node_path[0], xml), *node_path[1:])", "def find_all_subgraphs(graph, match):\n if not match:\n return []\n\n return _find(graph, match, {}, match)", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def __dfs(self, subtree, path):\n if isinstance(subtree, list):\n for node in subtree:\n for child in self.__dfs(node, path + \"[\" + str(subtree.index(node)) + \"]\"):\n yield child\n elif isinstance(subtree, dict):\n for node in subtree:\n for child in self.__dfs(subtree[node], path + \"/\" + node):\n yield child\n else: # Leaf node\n yield (subtree, path)", "def find_all(st, sub):\n\n if not sub: return None\n if sub[0] not in st.root.trans: return None\n \n found, i, s = False, 0, st.root\n scaned = 0 # length of the scaned\n while True:\n k, p, s = s.trans[sub[i]]\n len1, len2 = p-k+1, len(sub)-i\n if len1 >= len2:\n if st.text[k:k+len2] == sub[i:]:\n found, scaned = True, scaned+len1\n break\n else:\n if st.text[k:k+len1] == sub[i:i+len1]:\n i, scaned = i+len1, scaned+len1\n else: break\n if found:\n # shift_of_suffix = len(st.text) - len(suffix)\n leaf_depthes = get_leaf_depthes(s)\n return [len(st.text)-x-scaned for x in leaf_depthes]\n\n return None", "def test_search_subnode_2_value(self):\n\n lista = []\n for (n, _) in self.parser.search(self.xml, 'subnodes@ref=2'):\n for (_, v2) in self.parser.search(n, 'subnode'):\n lista.append(v2)\n self.assertEqual(lista, ['S2.1', 'S2.2'])", "def s_level_n_descendants(node, n):\r\n if n==0:\r\n return [node]\r\n else:\r\n children = []\r\n for child in node.children:\r\n children.extend(Node.s_level_n_descendants(child, n-1))\r\n return children", "def getLevelNames(names):\n topNames = []\n deeperNames = []\n for item in names:\n if isinstance(item, str):\n topNames.append(item)\n else:\n topNames.append(item[0])\n # Names immediately under the current level must be\n # qualified with the current level full name\n for j in item[1]:\n if isinstance(j, str):\n subname = '%s/%s' % (item[0], j)\n else: # j is a 2-tuple\n jlist = list(j)\n jlist[0] = '%s/%s' % (item[0], jlist[0])\n subname = tuple(jlist)\n deeperNames.append( subname)\n return topNames, deeperNames", "def search(self, word):\n def _subSearch(node, word):\n if not word:\n return node.isWord\n\n contains = False\n if word[0] == '.':\n for c in node.children:\n contains |= _subSearch(node.children[c], word[1:])\n if contains:\n return True\n elif word[0] in node.children:\n contains |= _subSearch(node.children[word[0]], word[1:])\n\n return contains\n\n return _subSearch(self.root, word)\n\n\n # cur = self.root\n # nodes = []\n # nodes.append(cur)\n\n # for c in word:\n # # new_nodes = []\n # # for node in nodes\n # # if c == '.':\n # if c not in cur.children:\n # return False\n \n # cur = cur.children[c]", "def dfs(node, all_nodes, depth):\r\n node.depth = depth\r\n to_return = [node,]\r\n for subnode in all_nodes:\r\n if subnode.parent and subnode.parent.id == node.id:\r\n to_return.extend(dfs(subnode, all_nodes, depth+1))\r\n return to_return", "def search(line, pattern_tree, pattern_path, result_tree, result_path):\n node = (node for node in pattern_path[:])\n pattern_path[:] = [] # Start search at root\n while not search_down(line, pattern_tree, pattern_path, result_tree, result_path):\n try:\n pattern_path.append(node.next())\n except StopIteration:\n break", "def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset", "def _return_string_all_descendants_rec(self, node, string, level):\n if len(node.get_children()) == 0:\n return string\n else:\n level += 1\n for child in node.get_children():\n string += \"| \"*level\n string += \"|---\" + str(child) + \"\\n\"\n string = self._return_string_all_descendants_rec(child, string, level)\n return string", "def subtrees(self):\n yield from subtrees(self)", "def subtrees(self):\n return list(iter(self))", "def get_subdiagrams_grouped_by_level(self):\n subds = []\n\n def get_subds_gbl_rec(node, level):\n \"\"\"\n The recursive call\n \"\"\"\n try:\n subds[level] = subds[level].union({node})\n except IndexError:\n subds.append({node})\n if not isinstance(node, Leaf):\n for child in node.child_nodes:\n get_subds_gbl_rec(node.child_nodes[child][0], level+1)\n\n get_subds_gbl_rec(self, 0)\n return subds", "def get_subfiles(self) -> Set[str]:\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tself.subfiles.update(node.get_subfiles())\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tself.subfiles.update(subnode.filenames)\n\t\t# Return\n\t\treturn self.subfiles", "def find_step(tree: Tree, template: Tree) -> list:\n if template == \"@\":\n if type(tree) == str:\n return [tree]\n return tree.leaves()\n\n if template == '*':\n return []\n\n if type(template) != str and template.label() == '*':\n res_star = []\n for temp_node in template:\n res_star.extend(find_star_step(tree, temp_node))\n return res_star\n\n if type(tree) == str or type(template) == str:\n if tree == template:\n return []\n return []\n\n if tree.label() != template.label():\n return []\n else:\n\n res = []\n for t_node in template:\n for node in get_node_by_label(tree, t_node):\n res.extend(find_step(node, t_node))\n return res", "def get_effective_children(self, recursive=False):\n if not self.has_key('name'):\n return []\n name = self['name']\n children = self.objects.filter(use__has_field=name)\n if recursive == True:\n grandchildren = []\n for i in children:\n grandchildren += i.get_effective_children(recursive)\n children += grandchildren\n return children", "def tokenize_recursively(text, re_list, depth=0):\n if depth >= len(re_list):\n return [text]\n tokens = []\n pos = 0\n regex, typ = re_list[depth]\n while pos < len(text):\n m = regex.search(text, pos)\n if not m:\n tokens.extend(tokenize_recursively(text[pos:], re_list, depth+1))\n break\n else:\n startpos, endpos = m.span()\n if startpos > pos:\n tokens.extend(tokenize_recursively(text[pos:startpos],\n re_list, depth+1))\n tokens.append((text[startpos:endpos], typ))\n pos = endpos\n return tokens", "def search(self, word):\n nodes = [self.root]\n print self.root.children\n i = 0\n while i < len(word):\n tmp = []\n for node in nodes:\n if word[i] in node.children:\n tmp.append(node.children[word[i]])\n elif word[i] == '.':\n tmp.extend(node.children.values())\n if not tmp:\n return False\n nodes = tmp\n i += 1\n return True", "def get_subdiagrams(self, depth):\n subdiagrams = set()\n\n def get_sds_rec(node, sds, level, cur_level):\n if level == cur_level:\n return sds.union({node})\n else:\n # the children have to be sorted for some usages\n children = set()\n for child in node.child_nodes:\n children = children.union(get_sds_rec(node.child_nodes[child], sds, level, cur_level+1))\n return children\n subdiagrams = get_sds_rec(self, subdiagrams, depth, 0)\n return subdiagrams" ]
[ "0.63235563", "0.60124", "0.58496034", "0.57926613", "0.5770309", "0.57200235", "0.569196", "0.56852955", "0.53977126", "0.5383091", "0.53375274", "0.5329562", "0.53087634", "0.5284985", "0.52425534", "0.5237866", "0.5201636", "0.51965714", "0.51948684", "0.5189998", "0.5178208", "0.5177541", "0.51733094", "0.51647985", "0.5151495", "0.51255506", "0.5123509", "0.50933135", "0.50851524", "0.5084643" ]
0.62397116
1
Writes this XML node into string form. If the recursive flag is true, writes all subnodes recursively too. The indent string is prepended to each line. Returns the total string.
def toString(self, recursive=True, indent=""): s = indent + '<' + self.name for (n,v) in self.attrs.items(): s = s + ' ' + n + '="' + v + '"' c = self.content.strip() if c or len(self.kids) > 0: s = s + '>\n' if c: s = s + indent + " " + c + '\n' if recursive: for nd in self.kids: s = s + nd.toString(recursive,indent=indent+" ") s = s + indent + '</' + self.name + '>\n' else: s = s + '/>\n' return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tree_to_string(self, indent):\n\t\ts = self.indent_string(indent) + str(self)\n\t\tfor c in self.child_nodes:\n\t\t\ts += c.tree_to_string(indent + 1)\n\t\treturn s", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def dump_internal(self, indent):\n XML_INDENT = 4 # Number if chars to indent children.\n text = ' ' * indent + self.__repr__() + '\\n'\n\n for el in self.children:\n text += el.dump_internal(indent + XML_INDENT)\n\n text += ' ' * indent + '</%s>\\n' % self.tag\n\n return text", "def _str(indent, root):\n if root is None:\n return \"\"\n else:\n return (BST._str(indent + \"\\t\", root.right) +\n indent + repr(root.item) + \"\\n\" +\n BST._str(indent + \"\\t\", root.left))", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def __str__(self):\n outstr = ''\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + ' ' + str(self._element)\n if self._rightchild:\n outstr = outstr + str(self._rightchild)\n return outstr", "def __str__(self):\n string = ''\n\n # gets the nodes at each level and puts the values into a string\n for i in range(self.get_height()+1):\n nodes = self.get_nodes_on_level(i)\n level = [str(node.value) if node else '-' for node in nodes]\n string += '{}\\n'.format(' '.join(level))\n\n return string", "def pretty_string(self):\n return self._tree.toStringPrettyPrint()", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def __str__(self):\n current = self.root\n nodes = [self.root]\n final = str(self.root) + \"\\n\"\n count = 0\n while len(nodes) != 0:\n count += 1\n if count == 10:\n return \"\"\n temp = []\n for node in nodes:\n if node.left != None:\n temp.append(node.left)\n final += str(node.left) + \" \"\n else:\n final += \"_ \"\n if node.right != None:\n temp.append(node.right)\n final += str(node.right) + \" \"\n else:\n final += \"_ \"\n if temp == []:\n if node == nodes[len(nodes) - 1]:\n break\n final += \"\\n\"\n nodes = temp\n self.in_order_traversal()\n for item in self.traverse:\n final += str(item.key) + \" \"\n final += \"\\n\"\n return final", "def __str__(self):\n # Tricky to do iteratively so we do it recursively.\n return BST._str(\"\", self.root)", "def to_string(self):\n tree_structure_str = self.node_to_string(self.root, 0, is_add_children=True).rstrip()\n return tree_structure_str", "def toXML( self, indent = '', **kwargs ) :\n\n return( '\\n'.join( self.toXMLList( **kwargs ) ) )", "def str_recursive(node):\n\n if node == None:\n return \"\"\n else:\n return str(node.item) + \" \" + LinkedList.str_recursive(node.next)", "def pprint(self, indent: str = \"\"):\n\n from os import linesep\n\n res = self.__str__() + linesep\n child_indent = f\"{indent} \"\n\n pos = -1\n for x in self.children:\n pos += 1\n if pos == len(self.children) - 1:\n res += f\"{child_indent}└── {x.pprint(child_indent)}\"\n else:\n res += f\"{child_indent}β”œβ”€β”€ {x.pprint(child_indent)}\"\n return res", "def prettify(self):\n reparsed = minidom.parseString(self.tostring())\n return reparsed.toprettyxml(indent=' ', encoding='utf-8')", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def dump_tree(self) -> str:\n return utils.dump_tree(self._tree)", "def node_to_string(self, node, tab_count=0, is_add_children=False):\n tabs_str = ''\n for i in range(0, tab_count):\n tabs_str += '\\t'\n\n node_str = tabs_str + str(node.value) + ': ' + str(node.count)\n\n children_str = ''\n if is_add_children:\n for child_node in node.children:\n children_str += '\\n\\t' + tabs_str + self.node_to_string(child_node, tab_count+1, True)\n\n return node_str + children_str", "def pretty_print_content(self):\n\n return lxml.etree.tostring(self.get_content(),\n pretty_print = True,\n encoding = self.encoding,\n xml_declaration = True)", "def serialize(self, root):\n if not root: return ''\n\n result = str(root.val) + '['\n\n for i in range(len(root.children)):\n result += self.serialize(root.children[i]) + ' '\n\n result = result[0 : len(result) - 1]\n\n result += ']'\n \n return str(root.val) if not root.children else result", "def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)", "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def pretty_print(self):\n return self.tree.pretty_print()", "def print_tree(self):\n recur_print = self.recur_print(tree.root, '')[:-1]\n return recur_print", "def prettify(self):\n rough_string = ET.tostring(self.root, encoding='utf-8', method='xml').decode('utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def __str__(self):\n result = xml.dom.minidom.parseString(\n xml.etree.ElementTree.tostring(\n self.ToXMLElement(), encoding='utf-8')).toprettyxml(indent=' ')\n\n return result", "def __str__(self, indent: int=0) -> str:\n root_str = indent * \" \" + str(self.value)\n mid = len(self.non_none_kids()) // 2\n left_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][: mid]\n right_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][mid:]\n return '\\n'.join(right_str + [root_str] + left_str)" ]
[ "0.7190528", "0.6967208", "0.68142414", "0.6669996", "0.65078336", "0.64969987", "0.63850766", "0.6360391", "0.63594466", "0.63138396", "0.63080406", "0.6300737", "0.6280438", "0.624446", "0.6210749", "0.61899465", "0.61862755", "0.61584604", "0.61571324", "0.61510396", "0.6142981", "0.6111116", "0.6110152", "0.6109846", "0.6030543", "0.598212", "0.59786516", "0.5976549", "0.59684587", "0.5960899" ]
0.71017915
1
The constructor determines the error class used by ElementTree.parse().
def __init__(self): try: # this succeeds with python 2 import StringIO class_StringIO = StringIO.StringIO except Exception: # this succeeds with python 3 import io class_StringIO = io.StringIO # create some XML with an error sio = class_StringIO( "<foo> <bar> </foo>\n" ) try: ET.parse( sio ) except Exception: self.ET_exc_class = sys.exc_info()[0] else: # something is wrong; the drawback to this fallback is that you # cannot distinguish an XML error from other errors self.ET_exc_class = Exception
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,xmlnode_or_cond,error_type=None,copy=1,parent=None):\n if type(xmlnode_or_cond) is str:\n xmlnode_or_cond=unicode(xmlnode_or_cond,\"utf-8\")\n if type(xmlnode_or_cond) is unicode:\n if not stanza_errors.has_key(xmlnode_or_cond):\n raise ValueError, \"Bad error condition\"\n\n ErrorNode.__init__(self,xmlnode_or_cond,STANZA_ERROR_NS,copy=copy,parent=parent)\n\n if type(xmlnode_or_cond) is unicode:\n if error_type is None:\n error_type=stanza_errors[xmlnode_or_cond][1]\n self.xmlnode.setProp(\"type\",to_utf8(error_type))", "def __init__(self,xmlnode_or_cond,copy=1,parent=None):\n if type(xmlnode_or_cond) is str:\n xmlnode_or_cond = xmlnode_or_cond.decode(\"utf-8\")\n if type(xmlnode_or_cond) is unicode:\n if not stream_errors.has_key(xmlnode_or_cond):\n raise ValueError, \"Bad error condition\"\n ErrorNode.__init__(self,xmlnode_or_cond,STREAM_ERROR_NS,copy=copy,parent=parent)", "def __init__(self, *args):\n this = _libsbml.new_XMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def parse_error(self, error: Union[str, Exception],\n elem: Optional[ElementType] = None,\n validation: Optional[str] = None) -> None:\n if validation is not None:\n check_validation_mode(validation)\n else:\n validation = self.validation\n\n if validation == 'skip':\n return\n elif elem is None:\n elem = self.elem\n elif not is_etree_element(elem):\n msg = \"the argument 'elem' must be an Element instance, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(elem))\n\n if isinstance(error, XMLSchemaParseError):\n error.validator = self\n error.namespaces = getattr(self, 'namespaces', None)\n error.elem = elem\n error.source = getattr(self, 'source', None)\n elif isinstance(error, Exception):\n message = str(error).strip()\n if message[0] in '\\'\"' and message[0] == message[-1]:\n message = message.strip('\\'\"')\n error = XMLSchemaParseError(self, message, elem)\n elif isinstance(error, str):\n error = XMLSchemaParseError(self, error, elem)\n else:\n msg = \"'error' argument must be an exception or a string, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(error))\n\n if validation == 'lax':\n self.errors.append(error)\n else:\n raise error", "def __init__(self, *args):\n this = _libsbml.new_XMLErrorLog(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def __init__( self, xml_raw = None ):\n # Instantiates class attributes\n self.xml_root = None\n self.error_message = None\n self.validated = True\n\n # Loads raw xml\n if xml_raw is not None:\n self.validated = self.validate_and_load( xml_raw )", "def error(self, code, message=None):\n return xpath_error(code, message, self, self.error_prefix)", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def __init__(self, value, line=None):\n\n\t\tLOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n\t\tAbstractParsingError.__init__(self, value)\n\n\t\t# --- Setting class attributes. ---\n\t\tself.__line = None\n\t\tself.line = line", "def __init__(self, node):\n\t\t\tself.name = node.get('name')\n\t\t\tif not re.match(\"^[0-9A-Z_]*$\", self.name):\n\t\t\t\traise ParserException(\"Attribute name of element in enum has to be UPPER_UNDERSCORE_STYLE (found: '%s')\" % (self.name))\n\t\t\t\n\t\t\tself.string = node.get('string')\n\t\t\tif self.string is None:\n\t\t\t\tself.string = self.name\n\t\t\t\t\n\t\t\tself.description = xml_utils.get_description(node)\n\t\t\tself.string = xml_utils.get_string(node)\n\t\t\t\n\t\t\tvalue = node.get('value')\n\t\t\tself.value = None if (value is None) else int(value, 0)", "def __init__(self, file):\n\n try:\n self.tree = etree.parse(file)\n except Exception as lxml_ex:\n raise TypeError(\"File could not be opened: Must be .xml or encoded\"\n \" like .xml. LXML ERROR: {}\".format(str(lxml_ex)))\n self.root = None\n self.base = None", "def __init__(self, source_text, syntax_error_ctor):\n self.src = source_text\n self.syntax_error_ctor = syntax_error_ctor", "def __init__(self, node):\n from aiida.common import exceptions\n super(BigDFTParser, self).__init__(node)\n if not issubclass(node.process_class, BigDFTCalculation):\n raise exceptions.ParsingError(\"Can only parse BigDFTCalculation\")", "def __init__(self, *args):\n this = _libsbml.new_SBMLErrorLog(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, source):\n self.tree = ET.parse(source)\n self.root = self.tree.getroot()", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def __init__(self, node_def, op, message, error_code):\n ...", "def __init__(self, message, fatal, error_num=None):\n Exception.__init__(self, message)\n self.fatal = fatal\n self.errno = error_num", "def __init__(self, schemas):\n # Prepend \"string\" to handle system errors\n schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas)\n super(ErrorUnionSchema, self).__init__(schemas=schemas)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def test_invalid_xml(self):\r\n with self.assertRaises(Exception):\r\n self.build_problem(type=\"invalidtextgroup\")", "def __init__(self, node):\n super().__init__(node)\n if not issubclass(node.process_class, Psi4Calculation):\n raise exceptions.ParsingError('Can only parse Psi4Calculation')", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def __init__(self, elem_type):\n if not isinstance(elem_type, ResultType):\n raise TypeError('%s is not a type' % str(elem_type))\n self._elem_type = elem_type" ]
[ "0.71514964", "0.6995184", "0.67318547", "0.6395666", "0.6299641", "0.6046157", "0.60218483", "0.60218483", "0.60218483", "0.59919393", "0.59288156", "0.5885075", "0.58716553", "0.5833256", "0.5825548", "0.5782933", "0.5762098", "0.57318366", "0.57283765", "0.5712078", "0.5699688", "0.56843", "0.56417555", "0.5637586", "0.56348515", "0.56252486", "0.5616368", "0.56040454", "0.5589514", "0.5584974" ]
0.75455344
0
Open the XML file and read its contents into a tree of XmlNode objects. XML errors raise an XmlError exception.
def readDoc(self, filename): try: doc = ET.parse( filename, parser=LineNumberingParser() ) except self.ET_exc_class: raise XmlError( str(sys.exc_info()[1]) ) rootnode = recurse_construct_ET_to_XmlNode( None, doc.getroot() ) return rootnode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_xml_file(filename):\n###############################################################################\n with __FILE_OPEN(filename) as file_:\n tree = ET.parse(file_)\n root = tree.getroot()\n # End with\n return tree, root", "def read_xml(file_dir):\n tree = ElementTree()\n tree.parse(file_dir)\n return tree", "def parse_xml(file: Path):\n check_file(file) # Check the existency of the file\n\n doc = ET.parse(file)\n data = parse_xml_tree(doc.getroot())\n return data", "def parse_file(self, filename):\n \n # create DOM tree\n try:\n file_object = self.auto_open(filename)\n except IOError as e:\n self.my_print('Error opening file ' + filename + str(e), msg_types.ERROR)\n raise\n \n try:\n self.begin('building DOM tree from file ' + filename)\n dom = ET.XML(file_object.read())\n self.end()\n file_object.close()\n\n except SyntaxError as e:\n self.my_print(\"XML parsing error: \" + str(e), msg_types.ERROR)\n raise\n \n # create lexical resource\n try:\n self.begin('Extracting data from DOM of file ' + filename)\n lr = lexical_resource.LexicalResource(dom)\n self.end()\n return lr\n except FatalError as e:\n self.my_print(str(e), msg_types.ERROR)\n self.my_print('Processing stopped. Please fix the previous error(s) and try it again', msg_types.ERROR)\n raise", "def parse_xml_file(self, filename):\n try:\n dom = parse(filename)\n process_includes(dom)\n except ExpatError, x:\n raise EzXMLError(\"Failed to parse: %s\" % x)\n return self.walkdom(dom.documentElement)", "def parse_xml(xmlfile):\n # create element tree object\n root = ET.parse(xmlfile).getroot()\n return root", "def file_parser(inputfile):\n s_xml = None\n root = None\n try:\n with open(inputfile, 'r') as fd:\n s_xml = fd.read()\n except (IOError,) as e:\n tracker()\n return None\n try:\n root = ET.fromstring(s_xml)\n except (ET.ParseError,) as e:\n tracker()\n return None\n return root", "def readFile(filename, encoding, options):\n ret = libxml2mod.xmlReadFile(filename, encoding, options)\n if ret is None:raise treeError('xmlReadFile() failed')\n return xmlDoc(_obj=ret)", "def openFile(self, filename):\n self._tree = ET.parse(filename)\n self._root = self._tree.getroot()\n self._groups = self._root.findall('group')", "def from_file(self, xml_filepath: str) -> None:\n\t\t# Set filename and get root element of the tree\n\t\txml_filelist = xml_filepath.split(\"/\")\n\t\tself.xml_dir = xml_filelist[0]\n\t\tself.xml_name = xml_filelist[1]\n\t\troot: ET.Element = get_xml_file(xml_filepath)\n\t\t# Set name\n\t\tself.name = root.tag\n\n\t\t# Iterate over and add child nodes\n\t\tchild: ET.Element\n\t\tfor child in root:\n\t\t\t# Determine if child is a SubNode or a Node\n\t\t\t# If child has children or attributes it is a Node\n\t\t\tif len(child) or len(child.attrib):\n\t\t\t\t# Add Node\n\t\t\t\tself.add_node(Node(child))\n\t\t\telse:\n\t\t\t\tself.add_subnode(SubNode(child))", "def _xmlRead(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n logger.debug(\"{0:s}xmlFile: {1:s} parse Xml ...\".format(logStr,self.xmlFile)) \r\n tree = ET.parse(self.xmlFile) # ElementTree \r\n root = tree.getroot() # Element\r\n\r\n self.dataFrames=Xm._xmlRoot2Dfs(root)\r\n\r\n #fixes and conversions\r\n self._convertAndFix()\r\n\r\n #Views\r\n self._vXXXX()\r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def get_root_node(filename):\n try:\n tree = etree.parse(filename, etree.XMLParser(resolve_entities=False))\n return tree.getroot()\n except XMLSyntaxError as xse:\n logging.error(\"ERROR : error parsing XML file {filename}\".format(filename=filename))\n raise xse\n except IOError as ioe:\n logging.error(\"ERROR : xml file {filename} does't exist.\".format(filename=filename))\n raise ioe\n except Exception as e:\n logging.error(\"ERROR : exception while processing XML file {filename}\".format(filename=filename))\n raise e", "def parse_file(self, filename):\n\n root_name_checked = False\n\n xml_text = self._make_string(filename)\n\n for token, content, loc in XML(xml_text):\n\n if token == \"START\":\n\n name = content[0]\n attr = content[1]\n\n # We are building a tree of Element objects. The issue with\n # parsing multiple files is that each XML file needs a common\n # root Element to form the top of the tree. Therefore, the\n # requirement for parsing a second file is that it has a same\n # named root Element.\n\n if self.__root is not None and not root_name_checked:\n\n if self.__root.getName() == name:\n\n # We already have a root element, and that root element\n # has the same name as the root element in this second\n # file. Continue with the parse.\n\n # Potential issue is that doing this will not call the\n # registered start visitor. Is that okay since we are\n # treating it as a common node.\n\n self.__node_stack.append(self.__root)\n\n else:\n\n # We already have a root element, but that root element\n # name does not match the root element name from the\n # previously parsed file. Stop since this will result\n # in an orphaned tree branch.\n\n print(\n \"XML file (%s) has invalid root name: %s (expected: %s).\"\n % (filename, name, self.__root.getName())\n )\n return\n\n else:\n self._startElement(name, attr)\n\n root_name_checked = True\n\n elif token == \"TEXT\":\n self._cData(content)\n\n elif token == \"END\":\n name = content[0]\n self._endElement(name)\n\n return self.__root", "def read_xml(self):\n pass", "def readXML(xmlfile, expectedRootTag=None):\n\n # Read in XML\n try:\n etree = XML.ElementTree(file=xmlfile)\n except XMLParseError, e:\n raise ValueError(\"Unable to parse file '%s' because: %s\" % (xmlfile, e,))\n\n if expectedRootTag:\n root = etree.getroot()\n if root.tag != expectedRootTag:\n raise ValueError(\"Ignoring file '%s' because it is not a %s file\" % (xmlfile, expectedRootTag,))\n\n return etree, etree.getroot()", "def load_xml(self, filepath=None, escapeNewline=True, maxSize=0, createMap=True):\n\n if filepath != None:\n self.mFilePath = filepath\n self.mReplaceNewline = escapeNewline\n\n if not os.path.exists(str(self.mFilePath)):\n print \"Warning: The filepath '%s' does not exist. Please make sure to pass the right path as load_xml('foo/bar')\" %filepath\n return False\n\n if not escapeNewline:\n try:\n input = StringIO(gzip.open(self.mFilePath, \"r\").read())\n except IOError:\n input = StringIO(open(self.mFilePath, \"r\").read())\n\n\n else:\n # replace Live's newline string with a dummy ###newline_escape###\n # we will revert this back on writing.\n # using the escapeNewline is slow on large documents\n\n try:\n file = gzip.open(self.mFilePath, \"r\").read()\n except IOError:\n file = open(self.mFilePath, \"r\").read()\n\n input = StringIO(re.sub(r\"&#x0[DA];\", \"###newline_escape###\", file))\n\n del(file) # save memory\n\n if maxSize:\n maxSize = maxSize*1048576 # in MB\n if len(input.getvalue()) > maxSize:\n print \"Warning: Large Document - skipping %s\" %filepath\n return False\n\n self.tree = ET.ElementTree(file=input)\n\n input.close()\n\n if createMap:\n self.child_to_parent_dict()\n\n return True", "def _parse_xml(self, hero_file):\n filepath = os.path.join(self._hero_folder, hero_file)\n\n tree = xml.etree.ElementTree.parse(filepath)\n root = tree.getroot()\n return root", "def open(self, infile):\n if infile.endswith('.xml'):\n self.proj = None\n try:\n self.tree = ET.parse(infile)\n return self.tree\n except Exception, e:\n print \"Error opening file\",e\n usage()\n else:\n try:\n self.mpp = win32com.client.Dispatch(\"MSProject.Application\")\n self.mpp.Visible = False\n self.mpp.FileOpen(infile)\n self.proj = self.mpp.ActiveProject\n return self.proj\n except Exception, e:\n print \"Error opening file\",e\n usage()", "def load(filename):\n return XMLReader().from_file(filename)", "def LoadXML(NAME):\r\n # Basics snaged from https://docs.python.org/2/library/xml.etree.elementtree.html\r\n Tree = parse(NAME) # opens and turns the xml file into a tree\r\n Root = Tree.getroot()\r\n return(Root)", "def parseFile(filename):\n ret = libxml2mod.xmlParseFile(filename)\n if ret is None:raise parserError('xmlParseFile() failed')\n return xmlDoc(_obj=ret)", "def __init__(self, file):\n\n try:\n self.tree = etree.parse(file)\n except Exception as lxml_ex:\n raise TypeError(\"File could not be opened: Must be .xml or encoded\"\n \" like .xml. LXML ERROR: {}\".format(str(lxml_ex)))\n self.root = None\n self.base = None", "def load_xml(filename):\n path = dirname(__file__)\n with open(join(path, 'data', filename)) as file:\n content = file.read()\n return content", "def get_xml(xml_file_path: str):\n root = et.parse(xml_file_path).getroot()\n\n return root", "def parse_xml(xml_file):\n logging.info(str(xml_file))\n global nipper_xml\n xml_tree = ElementTree.parse(xml_file)\n\n nipper_xml = xml_tree.getroot()", "def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()", "def parse(xmlfile):\r\n g_logger.info('Starting parsing ' + xmlfile)\r\n parser = xml.sax.make_parser()\r\n try:\r\n handler = StructureHandler()\r\n except ParsingException as e:\r\n g_logger.warning(e.message)\r\n return None # TODO critical?\r\n parser.setContentHandler(handler)\r\n parser.parse(xmlfile)\r\n return handler.structures", "def readXML(self, xmlNode, variableGroups=None, globalAttributes=None):\n self.variableGroups = variableGroups if variableGroups is not None else {}\n xmlUtils.replaceVariableGroups(xmlNode, self.variableGroups)\n if 'name' in xmlNode.attrib.keys():\n self.name = xmlNode.attrib['name']\n else:\n self.raiseAnError(IOError,'not found name for a '+self.__class__.__name__)\n self.type = xmlNode.tag\n if globalAttributes is not None:\n self.globalAttributes = globalAttributes\n if 'verbosity' in xmlNode.attrib.keys() or 'verbosity' in self.globalAttributes:\n verbGlobal = None if self.globalAttributes is None else self.globalAttributes.get('verbosity')\n verbLocal = xmlNode.attrib.get('verbosity')\n self.verbosity = verbLocal if verbLocal is not None else verbGlobal\n self.raiseADebug('Set verbosity for '+str(self)+' to '+str(self.verbosity))\n self._readMoreXML(xmlNode)\n self.raiseADebug('------Reading Completed for:')\n self.printMe()", "def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))", "def parseXML(xmlFile):\n\n tree = etree.parse(xmlFile)\n root = tree.getroot() \n transitionTable = dict()\n transitionTable = getTransitions(tree, root, transitionTable)\n return tree, root, transitionTable" ]
[ "0.7217277", "0.6876924", "0.6835852", "0.6671871", "0.6551055", "0.6519831", "0.64808804", "0.64725703", "0.63675314", "0.63521695", "0.634969", "0.63359207", "0.6323065", "0.6228589", "0.6217454", "0.61495256", "0.6140192", "0.61397547", "0.6138288", "0.61241144", "0.6115091", "0.5982609", "0.597239", "0.59639674", "0.59340245", "0.5923145", "0.591101", "0.59084153", "0.58705425", "0.5833679" ]
0.7357698
0