query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Sets the next node
def setNext(self, nextNode): self.__next = nextNode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_next(self, node):\r\n self.__next = node", "def set_next(self, node):\n self.__next = node", "def set_next(node, value):\n node['next'] = value", "def setNext(self, next_node):\n self.__nextListNode = next_node", "def setNext(self, next):\n\t\t\tself.next = next", "def set_next(self, new_next):\n self.next = new_next", "def next(self, next):\n\n self._next = next", "def next(self, next):\n\n self._next = next", "def next_node(self, value):\n if not isinstance(value, Node) and value is not None:\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__next_node = value", "def next_node(self, value):\n if isinstance(value, Node) is False:\n raise TypeError(\"next_node must be a Node object\")\n self.__next_node = value", "def next_node(self, value):\n if type(value) is not Node and type(value) is not None:\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__next_node = value", "def set_next(self, next_layer):\n self.next_layer = next_layer", "def next_node(self, value):\n if (type(value) != Node and value is not None):\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__position = value", "def linked_node(self, value):\n self._linked_node = value", "def __setattr__(self, key, value):\n if key == \"next\" and value:\n if value is not None:\n if not isinstance(value, Node):\n raise TypeError\n\n if Node.strict and value.next:\n # If we are in strict mode we check to make sure this\n # modification to `next` will not create a cycle.\n node = value.next\n while node:\n if node == self:\n raise ValueError(\"Cannot insert %s cycle detected\" \\\n % (value.data))\n node = node.next\n\n super(Node, self).__setattr__(key, value)", "def set_next_child(self, child, next_child):\n if child is not None and not isinstance(child, str):\n child._visit_meta['nxt'] = next_child\n if next_child is not None and not isinstance(next_child, str):\n next_child._visit_meta['prv'] = child", "def set_next(self, key: str):\n if not self.next:\n self._next = [key]\n elif key not in self.next:\n self._next.append(key)\n return self", "def setCurrentNode(self, newNode):\r\n\t\tself.currentNode = newNode", "def set_next(self, cell_id, color):\n self.next[cell_id] = color", "def set_next(self, handler):\n self.next = handler\n return handler", "def set_next_state(self, state):\n self.next_state = state", "def next(self, node):\n if node is None:\n self.my_next = None\n else:\n if type(node) == type(_QueueNode(0)):\n self.my_next = node\n else:\n raise ValueError(\"Invalid type\")", "def set_node(self, node):\n self.__node = node", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def next_node(self):\n\n return self.__next_node", "def set(self, value, index=0):\n\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n i = 0\n current_node = self._head\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n current_node.value = value", "def set_head(self, new_head: Node):\n pointer = self.head\n self.head = new_head\n self.head.next_node = pointer\n return self" ]
[ "0.8843889", "0.8778931", "0.8473048", "0.84528697", "0.83532506", "0.8179302", "0.7554658", "0.7554658", "0.7426825", "0.7420493", "0.72977376", "0.7261368", "0.722911", "0.6976248", "0.6957795", "0.6923199", "0.6881684", "0.67661965", "0.67643106", "0.66787094", "0.6670903", "0.66284174", "0.6572652", "0.6524823", "0.65168583", "0.65168583", "0.65168583", "0.64279264", "0.6410571", "0.6367803" ]
0.88359255
1
Create a new CommandManager with the specified commands. Each argument is a pair containing the name, attrib pairs, as the __setitem__ method is called on each element.
def __init__(self, *commands): self.cmds = dict() for nm, attr in commands: self[nm] = attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, command_list: list = None) -> None:\n if command_list is None:\n command_list = implemented_commands\n for command in command_list:\n setattr(self, command.get(\"name\").replace(\" \", \"_\"), self._SingleCommand(command))", "def set_commands(self, commands, append=False):\n if append:\n self.commands.extend(commands)\n else:\n self.commands = commands", "def command(command_list):\n def add_attribute(func):\n if not hasattr(func, \"command\"):\n func.command = []\n func.command.append(command_list)\n return func\n return add_attribute", "def __init__(self, command_list, ):\n self.command_list = [] # all addition via function below\n self.add_command( command_list )", "def __init__(self, **manager_commands):\n self.package = manager_commands", "def commands(self, commands):\n\n self._commands = commands", "def __init__(self, commands=None):\n self.commands = {}\n self.context = None", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def get_commands(bot):\n new_commands = []\n\n new_commands.append(Command(\n 'mycommand', subcommands=[\n SubCommand(\n Opt('myoption'),\n doc='This is a simple command with a single required option.'),\n SubCommand(\n Opt('custom', optional=True),\n Opt('attached', optional=True, attached='attached argument'),\n doc='This has two different optional options, one without an attached '\n 'parameter, and the other requiring an attached parameter.'),\n SubCommand(\n Opt('trailing'),\n Arg('arg 1'),\n Arg('arg 2'),\n Arg('arg 3', argtype=ArgTypes.SPLIT, additional='more args'),\n doc='This command requires a lot of trailing arguments.'),\n SubCommand(\n Opt('grouped'),\n Arg('grouped arguments', argtype=ArgTypes.MERGED),\n doc='This will group all given arguments as a single string.'),\n SubCommand(\n Opt('complex', attached='attached'),\n Opt('other', optional=True, attached='also required'),\n Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL, additional='more args'),\n doc='The complex option has a required attached parameter, and the '\n '\\'other\\' option also has a required attached parameter if '\n '\\'other\\' is included. Additionally, there will be a requirement '\n 'of at least 1 trailing argument.'),\n SubCommand(\n Opt('marquee'),\n Arg('text', argtype=ArgTypes.MERGED,\n check=lambda b, m, v, *a: len(v) <= 100,\n check_error=\"Marquee message must be less than 100 characters long.\"),\n doc='Creates a marquee that loops 3 times.')],\n shortcuts=[\n Shortcut(\n 'complex', 'complex {attached} other {other} {arg 1} {arg 2}',\n Arg('attached'), Arg('other'), Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL)),\n Shortcut(\n 'marquee', 'marquee {text}', Arg('text', argtype=ArgTypes.MERGED))],\n description='Your command description here.',\n other='This text is optional - it just shows up after everything '\n 'else. Quick note, all of the commands here can only be used by '\n 'bot moderators or above, as indicated by elevated_level. A '\n 'level of 2 would mean only server owners or above can use the '\n 'command, and a level of 3 would restrict the command to only '\n 'the bot owners.',\n elevated_level=1, category='demo'))\n\n new_commands.append(Command(\n 'myothercommand', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),\n doc='This traps all further commands from being executed.'),\n SubCommand(\n Opt('order'), Opt('matters'),\n doc='It is impossible to access this command because the first '\n 'subcommand will always be satisfied first. Order of the '\n 'subcommand matters!'),\n SubCommand(\n Opt('sample'), Opt('foo'), Opt('bar'),\n doc='Also impossible to access. This subcommand just adds some '\n 'keywords to the command.')],\n description='Only bot owners can see this text!',\n other='Note that no shortcuts were defined. They, too, are optional. '\n 'Also, this command is hidden, which means that only the bot '\n 'owners can see this command listed from the help command. '\n 'However, unless the command is configured with an elevated '\n 'permissions level, any user can still execute the command. '\n 'Users still will not be able to see the specific help for this '\n 'command, though. Lastly, this command is disabled in DMs.',\n hidden=True, allow_direct=False, category='demo'))\n\n new_commands.append(Command(\n 'notify', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED),\n doc='Notify the owners with some text!')],\n other='This command uses a custom function. It is called with the '\n 'same arguments as get_response. The command will show up to '\n 'all users in the help command, but can only be used by server '\n 'owners, as it is disallowed in direct messages.',\n elevated_level=2, allow_direct=False, function=custom_notify,\n category='demo'))\n\n new_commands.append(Command(\n 'wait', other='Use this command to demo the wait_for functionality', category='demo'))\n\n return new_commands", "def at_cmdset_creation(self):\n self.add(Command())", "def create_command(text, commands):\n\n class CustomCommand(BaseCommand):\n description = text\n\n def run(self):\n for cmd in commands:\n subprocess.check_call(cmd)\n\n return CustomCommand", "def __setitem__(self, name, attribs):\n \n assert(type(attribs) is list)\n \n self.register(Command(*([name] + attribs)))", "def _AddCmdInstance(self, command_name, cmd, command_aliases=None):\n for name in [command_name] + (command_aliases or []):\n self._cmd_alias_list[name] = command_name\n self._cmd_list[command_name] = cmd", "def create_command_list(device):\n command = XmlApiObject({})\n command.name = \"test\"\n device.commands[command.name] = command", "def parse_commands(self, commands):\n\n for command_str in commands:\n command_parts = command_str.split(' ')\n\n # Check if command string has at least 2 parts: '--cmd' and 'command_type'\n if len(command_parts) <= 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n # Extract command and parameters\n command_type = command_parts[1].lower()\n command_parameters = command_parts[2:len(command_parts)]\n\n # Form a command to be added to the command queue\n command = {}\n if command_type == 'load':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n folder_path = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.image_source.load_images\n command['parameters'] = {\n 'folder_path': folder_path\n }\n\n elif command_type == 'align':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n\n command['method'] = self.app_instance.align_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index\n }\n\n elif command_type == 'blend':\n # Check number of parameters\n if len(command_parameters) != 5:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n x = int(command_parameters[1])\n y = int(command_parameters[2])\n width = int(command_parameters[3])\n height = int(command_parameters[4])\n\n command['method'] = self.app_instance.blend_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index,\n 'x': x,\n 'y': y,\n 'width': width,\n 'height': height\n }\n\n elif command_type == 'save':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n filename = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.save_result\n command['parameters'] = {\n 'filename': filename\n }\n\n else:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n print \"[INFO] Queuing command: \" + command_str\n\n self.command_queue.append(command)", "def command(self, *commands):\n def decorator(function):\n for command in commands:\n self.functions[command] = function\n return function\n return decorator", "def _init_commands(self):\n\t\tself.commands = {}\n\t\tself.log.info(\"Initializing commands...\")\n\t\t# Get all the commands and iterate over them\n\t\tfor command in self.conf_commands:\n\t\t\t\n\t\t\t# Verify the necessary config elements exist at all\n\t\t\tdisabled = command.get('disabled', False) # Disabled is optional, defaults to False\n\t\t\tif(disabled == True):\n\t\t\t\tcontinue;\n\t\t\tcommand_name = command.get('name', \"unknown\").lower()\n\t\t\tdescription = command.get('description', \"\")\n\t\t\tpermission_str = command.get('permission', None)\n\t\t\taction = command.get('action', None)\n\t\t\tmin_votes = command.get('min_votes', None)\n\t\t\targs = command.get('args', None)\n\t\t\taliases = command.get('aliases', None)\n\t\t\tif(command_name is None \n\t\t\t\tor permission_str is None \n\t\t\t\tor action is None \n\t\t\t\tor min_votes is None \n\t\t\t\tor args is None):\n\t\t\t\tself.log.warn(\"Command '{}': Error, missing 'permission', 'action', 'min_votes', or 'args' elements for command \".format(command_name))\n\t\t\t\tcontinue\n\n\t\t\t# Verify the votes and permission string are valid\n\t\t\tif(min_votes < 0):\n\t\t\t\tself.log.warn(\"Command '{}': Error, min_votes cannot be less than zero for command {}\".format(command_name, min_votes))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': minimum votes is {}\".format(command_name, min_votes))\n\n\t\t\ttry:\n\t\t\t\tpermission = Permission[permission_str]\n\t\t\t\tself.log.debug(\"Command '{}': permission is {}\".format(command_name, permission))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, permission string '{}' is invalid, must be one of: {}\".format(command_name, permission_str, Permission.__members__))\n\t\t\t\tcontinue\n\n\t\t\t# Try to get the corresponding action class\n\t\t\ttry:\n\t\t\t\tmodule = import_module(\"obs.actions.\"+action)\n\t\t\t\tclass_ = getattr(module, action)\n\t\t\t\tself.log.debug(\"Command {}: action is {}\".format(command_name, class_))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, no such action {} is defined. Full error: {}\".format(command_name, action, e))\n\t\t\t\tcontinue\n\n\t\t\t# Try to instantiate the action class\n\t\t\ttry:\n\t\t\t\tself.log.debug(\"Command {}: args are: {}\".format(command_name, args))\n\t\t\t\tcommand_obj = class_(self, command_name, aliases, description, permission, min_votes, args)\n\t\t\texcept ValueError as e:\n\t\t\t\tself.log.warn(e)\n\t\t\t\tcontinue\n\n\t\t\t# Add command_obj to internal reference\n\t\t\tself.commands[command_name] = command_obj\n\n\t\t\t# If there are aliases, add them too\n\t\t\t\n\t\t\tif(not aliases is None and isinstance(aliases, (list,) )):\n\t\t\t\tself.log.debug(\"Command '{}': Found aliases {}\".format(command_name, aliases))\n\t\t\t\tfor alias in aliases:\n\t\t\t\t\tself.commands[alias] = command_obj\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': No aliases\".format(command_name, aliases))\n\n\t\t# Finally after all commands have been initialized then add the help command\n\t\t#self.commands['help'] = Help(self)\n\n\t\t# Done initializing\n\t\tself.log.info(\"...Commands initialized: {}\".format(\n\t\t\t\tlist( self.commands.keys()) \n\t\t\t)\n\t\t)", "def init_command_objects(self):\n super().init_command_objects()\n device_args = (self, self.state_model, self.logger)\n # resource_args = (self.resource_manager, self.state_model, self.logger) \n # only use resource_args if we want to have separate resource_manager object\n\n self.register_command_object(\n \"Configure\",\n self.ConfigureCommand(*device_args)\n ) \n self.register_command_object(\n \"AddReceptors\",\n self.AddReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveReceptors\",\n self.RemoveReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveAllReceptors\",\n self.RemoveAllReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"ConfigureScan\",\n self.ConfigureScanCommand(*device_args)\n )\n self.register_command_object(\n \"StartScan\",\n self.ScanCommand(*device_args)\n )\n self.register_command_object(\n \"GoToIdle\",\n self.GoToIdleCommand(*device_args)\n )", "def at_cmdset_creation(self):\n self.add(power.CmdPower())\n self.add(CmdCursedBone())\n # self.add(CmdDeathSpike())\n \"\"\"\n self.add(CmdAnchor())\n self.add(CmdBloodCloak())\n self.add(CmdBloodShield())\n self.add(CmdBloodWard())\n self.add(CmdBodyToMind())\n self.add(CmdBoneScythe())\n self.add(CmdCircleDeath())\n self.add(CmdCorpseBurst())\n self.add(CmdCorpseDrain())\n self.add(CmdCreateBloodGem())\n self.add(CmdCurseDeathLink())\n self.add(CmdDeathRain())\n self.add(CmdDeathWard())\n self.add(CmdDisease())\n self.add(CmdBoneDust())\n self.add(CmdGloom())\n self.add(CmdImbueBlood())\n self.add(CmdImbueDeath())\n self.add(CmdMassSilence())\n self.add(CmdMassSleep())\n self.add(CmdMassAnchor())\n self.add(CmdMassWeakness())\n self.add(CmdPlague())\n self.add(CmdPoison())\n self.add(CmdPoisonCloud())\n self.add(CmdSilence())\n self.add(CmdSleep())\n self.add(CmdSpectralHunter())\n self.add(CmdSummon())\n self.add(CmdSummonCorruptedMan())\n self.add(CmdSummonCursedArmy())\n self.add(CmdSummonCursedMan())\n self.add(CmdSummonReanimatedMan())\n self.add(CmdTeleport())\n self.add(CmdTeleportOther())\n self.add(CmdTransferPain())\n self.add(CmdVampiricClaw())\n self.add(CmdVampiricTouch())\n self.add(CmdWeakness())\n \"\"\"", "def set_command_list(self):\n self.commands = dict( \\\n BTN_POWER_OFF = 2, \\\n BTN_TV = 27, \\\n BTN_1 = 4, \\\n BTN_2 = 5, \\\n BTN_3 = 6, \\\n BTN_4 = 8, \\\n BTN_5 = 9, \\\n BTN_6 = 10, \\\n BTN_7 = 12, \\\n BTN_8 = 13, \\\n BTN_9 = 14, \\\n BTN_0 = 17, \\\n BTN_FAVOURITE_CHANNEL = 68, \\\n BTN_PREVIOUS_CHANNEL = 19, \\\n BTN_VOLUME_UP = 7, \\\n BTN_VOLUME_DOWN = 11, \\\n BTN_CHANNEL_UP = 18, \\\n BTN_CHANNEL_DOWN = 16, \\\n BTN_MUTE = 15, \\\n BTN_SOURCE = 1, \\\n BTN_INFO = 31, \\\n BTN_TOOLS = 75, \\\n BTN_GUIDE = 79, \\\n BTN_RETURN = 88, \\\n BTN_MENU = 26, \\\n BTN_ENTER = 104, \\\n BTN_UP = 96, \\\n BTN_DOWN = 97, \\\n BTN_LEFT = 101, \\\n BTN_RIGHT = 98, \\\n BTN_INTERNET = 147, \\\n BTN_EXIT = 45, \\\n BTN_RED = 108, \\\n BTN_GREEN = 20, \\\n BTN_YELLOW = 21, \\\n BTN_BLUE = 22, \\\n BTN_TELETEXT = 44, \\\n BTN_MEDIA = 140, \\\n BTN_CONTENT = 121, \\\n BTN_CHANNEL_LIST = 107, \\\n BTN_AD = 0, \\\n BTN_SUBTITLE = 37, \\\n BTN_FORWARD = 69, \\\n BTN_PAUSE = 74, \\\n BTN_BACKWARD = 72, \\\n BTN_RECORD = 73, \\\n BTN_PLAY = 71, \\\n BTN_STOP = 70, \\\n BTN_SLEEP = 3, \\\n BTN_PICTURE_IN_PICTURE = 32, \\\n BTN_PSIZE = 62, \\\n BTN_ENERGY = 119, \\\n BTN_SRS = 110, \\\n BTN_PMODE = 40, \\\n BTN_P_DYNAMIC = 189, \\\n BTN_P_STANDARD = 223, \\\n BTN_P_MOVIE1 = 222, \\\n BTN_P_MOVIE2 = 221, \\\n BTN_P_USER1 = 220, \\\n BTN_P_USER2 = 219, \\\n BTN_P_USER3 = 218, \\\n BTN_ASPECT_43 = 227, \\\n BTN_ASPECT_169 = 228, \\\n BTN_S_SCART1 = 132, \\\n BTN_S_SCART2 = 235, \\\n BTN_S_MODULE = 134, \\\n BTN_S_AV = 236, \\\n BTN_S_VGA = 105, \\\n BTN_S_HDMI1 = 233, \\\n BTN_S_HDMI2 = 190, \\\n BTN_S_HDMI3_DVI = 194, \\\n BTN_S_HDMI4 = 197)", "def parse(self, commands):\n raise NotImplementedError()", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def add_command(self, name, cmd):\n if (\n not isinstance(cmd, types.FunctionType) and\n not issubclass(cmd, AbstractCommand)\n ):\n print_failure(\"{}-Command must inherit from AbstractCommand!\".format(name), 1)\n\n # setup command\n cmd = cmd() # type: AbstractCommand\n command = self._subparsers.add_parser(\n name,\n help=cmd.help,\n description=colored(cmd.description, 'yellow'),\n formatter_class=ColoredHelpFormatter,\n add_help=False\n )\n command.add_argument(\n '-h', '--help',\n action='help',\n default=argparse.SUPPRESS,\n help='Show this help message and exit.'\n )\n command.titles('Arguments', 'Options', color='cyan')\n\n # Add arguments and bind command\n for arg, opt in cmd.arguments.items():\n command.add_argument(arg, **opt)\n command.set_defaults(func=cmd.handle)\n self.commands[name] = command", "def at_cmdset_creation(self):\n self.add(default_cmds.CmdLook())\n self.add(default_cmds.CmdSay())", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))", "def gen_social_cmds(\n commands, verbs=SOCIAL_VERBS, command_cls=SocialCommand,\n parser=DirectedAction,\n):\n for verb in verbs:\n commands.register(command_cls(verb))", "def process_commands(self, commands: List[str]):", "def get_commands(self, component_loads):\n return {}", "def __getattr__(self, name):\n return Command(self.cmd, name)" ]
[ "0.70388836", "0.6526105", "0.6514899", "0.65065366", "0.6475493", "0.6474638", "0.6471777", "0.6467955", "0.6325269", "0.62973154", "0.6256178", "0.62097096", "0.6202347", "0.6164374", "0.6151688", "0.61034733", "0.60576606", "0.6051253", "0.60477805", "0.60348785", "0.6015943", "0.6010038", "0.6010038", "0.5997713", "0.5978523", "0.59560126", "0.59399086", "0.593246", "0.5883542", "0.58715165" ]
0.7571806
0
Create and register a command with the name item. This is a shortcut for constructing a Command and registering it with the register method. attribs is a list containing arguments to be sent to Command's constructor. Raises TypeError if attribs is not a list.
def __setitem__(self, name, attribs): assert(type(attribs) is list) self.register(Command(*([name] + attribs)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, name, command):", "def register_command(\n self, func, name=None, description=None, show_if=True, args_opts=None\n ):\n name = name or func.__name__\n if name in self._commands:\n raise NameError(\"This command already exists\")\n self._commands[name] = Command(func, name, description, show_if, args_opts)", "def command(command_list):\n def add_attribute(func):\n if not hasattr(func, \"command\"):\n func.command = []\n func.command.append(command_list)\n return func\n return add_attribute", "def add_command(self, name, desc, func=None):\n assert type(name) == str\n assert type(desc) == str\n if func is not None:\n assert callable(func)\n\n def wrap_argparse(parser, args, func):\n \"\"\"Convenience function calls argparse with list of args and calls func with them\"\"\"\n pargs = parser.parse_args(args)\n return func(**vars(pargs))\n\n assert name not in self.cmd2func, \"Command with same name already defined on this level!\"\n\n self.cmd_list.append((name, desc))\n if func is None:\n m = necapy(name=name, desc=desc)\n self.cmd2func[name] = m.parse\n return m\n else:\n ap = argparse.ArgumentParser(description=desc)\n self.cmd2func[name] = lambda args: wrap_argparse(ap, args, func)\n return ap", "def _AddCmdInstance(self, command_name, cmd, command_aliases=None):\n for name in [command_name] + (command_aliases or []):\n self._cmd_alias_list[name] = command_name\n self._cmd_list[command_name] = cmd", "def create(**cmd_opts):\n command_name = cmd_opts.get(CLI_CMDOPT.CMD_NAME, '')\n\n return CMDCONF_TYPES[command_name](**cmd_opts)", "def __init__(self, *commands):\n \n self.cmds = dict()\n \n for nm, attr in commands:\n self[nm] = attr", "def add_command(self, name, fct):\r\n self.cmds[name] = fct", "def add_command(self, name, cmd):\n if (\n not isinstance(cmd, types.FunctionType) and\n not issubclass(cmd, AbstractCommand)\n ):\n print_failure(\"{}-Command must inherit from AbstractCommand!\".format(name), 1)\n\n # setup command\n cmd = cmd() # type: AbstractCommand\n command = self._subparsers.add_parser(\n name,\n help=cmd.help,\n description=colored(cmd.description, 'yellow'),\n formatter_class=ColoredHelpFormatter,\n add_help=False\n )\n command.add_argument(\n '-h', '--help',\n action='help',\n default=argparse.SUPPRESS,\n help='Show this help message and exit.'\n )\n command.titles('Arguments', 'Options', color='cyan')\n\n # Add arguments and bind command\n for arg, opt in cmd.arguments.items():\n command.add_argument(arg, **opt)\n command.set_defaults(func=cmd.handle)\n self.commands[name] = command", "def assignCommand(*args, addDivider: AnyStr=\"\", altModifier: bool=True, annotation:\n Union[AnyStr, bool]=\"\", command: Union[Script, bool]=None, commandModifier:\n bool=True, ctrlModifier: bool=True, data1: Union[AnyStr, bool]=\"\", data2:\n Union[AnyStr, bool]=\"\", data3: Union[AnyStr, bool]=\"\", delete: int=0,\n dividerString: Union[AnyStr, bool]=\"\", enableCommandRepeat: bool=True,\n factorySettings: bool=True, index: int=0, keyArray: bool=True, keyString:\n Union[AnyStr, bool]=\"\", keyUp: bool=True, name: bool=True,\n numDividersPreceding: Union[int, bool]=0, numElements: bool=True,\n optionModifier: bool=True, sortByKey: bool=True, sourceUserCommands:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[None,\n Any]:\n pass", "def create_command(cmd, args):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tif cls.cmd() == cmd:\n\t\t\t\treturn cls(args)\n\n\t\treturn None", "def __getattr__(self, name):\n return Command(self.cmd, name)", "def add_cmd(self, cmd, name=\"\"):\n if cmd:\n self.cmds.add((cmd, name))", "def __init__(self, command_list: list = None) -> None:\n if command_list is None:\n command_list = implemented_commands\n for command in command_list:\n setattr(self, command.get(\"name\").replace(\" \", \"_\"), self._SingleCommand(command))", "def register_command(name):\n\n def register(cmd):\n Facade().register_command(name, cmd)\n return cmd\n\n return register", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def command(*args, **kwargs):\n def deco(fct):\n return Command(fct, **kwargs)\n if args:\n return deco(*args)\n return deco", "def factory(cmd, **default_kwargs):\n cmd = resolve_command(cmd)\n return Command(cmd)", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def register(self, cmd):\n\n assert(cmd.name not in self.cmds)\n \n data = cmd.data\n assert(type(data.switches) is dict)\n \n self.cmds[cmd.name] = data", "def register_command(*parse_args, **options):\n def wrapper(function):\n function._is_command = True\n return function\n return wrapper", "def register(self, command_name, command):\n self._commands[command_name] = command", "def add_command(self, command_info):\n self.commands[command_info.name] = command_info", "def register_command(self, func):\n self.commands[func.__name__] = func", "def __init__(\n self,\n name: Optional[str] = None,\n aliases: Iterable[str] = (),\n args: Iterable[Argument] = (),\n ) -> None:\n self.args = Lexicon()\n self.positional_args: List[Argument] = []\n self.flags = Lexicon()\n self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here\n self.name = name\n self.aliases = aliases\n for arg in args:\n self.add_arg(arg)", "def add_command(self, name, command_class, ns=None):\n ep = EntryPointWrapper(name, command_class)\n self.add_command_ep(ep, ns=ns)", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def addCommand(self, name, func, resultType=None, globalName=False):\n\n if globalName and \".\" in name:\n raise Exception(\"Invalid global name: %s!\" % name)\n elif not globalName and len(name.split(\".\")) != 2:\n raise Exception(\"Command names should always match namespace.name! Tried with: %s!\" % name)\n\n commands = self.__commands\n\n if name in commands:\n raise Exception(\"Command %s already exists!\" % name)\n\n commands[name] = {\n \"func\" : func,\n \"type\" : resultType\n }", "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))" ]
[ "0.60257137", "0.5921839", "0.5862189", "0.58104455", "0.5772847", "0.5719808", "0.56287426", "0.5623136", "0.56083953", "0.555473", "0.5527089", "0.54834545", "0.54792297", "0.5476723", "0.5469289", "0.5455345", "0.5449943", "0.5447708", "0.54236454", "0.53810966", "0.5364887", "0.53569734", "0.5353157", "0.53310525", "0.52879477", "0.52859026", "0.5277435", "0.5264919", "0.52575636", "0.5249646" ]
0.76530105
0
Create a new command description. name is the name of the command that the user must type at the prompt. switches is a dictionary that maps from the allowed command line switch keys to the type of their values.
def __init__(self, name, switches = dict()): self.name = name self.data = CmdData() self.data.switches = switches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generic(self, switches=[\"--help\"]):\n return self._command_template(switches)", "def add_command(self, name, command):\n if command['type'] == 'topic':\n if 'deadman_buttons' not in command:\n command['deadman_buttons'] = []\n command['buttons'] = command['deadman_buttons']\n if 'deadman_axes' not in command:\n command['deadman_axes'] = []\n command['axes'] = command['deadman_axes']\n elif command['type'] == 'action':\n if 'action_goal' not in command:\n command['action_goal'] = {}\n elif command['type'] == 'service':\n if 'service_request' not in command:\n command['service_request'] = {}\n self.command_list[name] = command", "def create(**cmd_opts):\n command_name = cmd_opts.get(CLI_CMDOPT.CMD_NAME, '')\n\n return CMDCONF_TYPES[command_name](**cmd_opts)", "def add_cli(self, subparser):\n new_parser = subparser.add_parser('create', help='create new scratch file')\n new_parser.add_argument('name', nargs='?', default=None, help=\"Optional Name to be given to the file, \"\n \"default name is an increment of 'scratch##'\")\n new_parser.set_defaults(func=self.action)\n return subparser", "def add(self, name, command):", "def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )", "def new_entry(path, name):\n\n default_config = {'prompt': \"Select command to run:\", 'choices': {}}\n with open(path, 'w') as f:\n json.dump(default_config, f)\n\n add_entry_to_database(path, name)", "def makecmd(self, options):", "def __init__(self, name, description, cli_opts):\n self.name = name\n self.description = description\n self.cli_opts = cli_opts", "def add_command(self, name, cmd):\n if (\n not isinstance(cmd, types.FunctionType) and\n not issubclass(cmd, AbstractCommand)\n ):\n print_failure(\"{}-Command must inherit from AbstractCommand!\".format(name), 1)\n\n # setup command\n cmd = cmd() # type: AbstractCommand\n command = self._subparsers.add_parser(\n name,\n help=cmd.help,\n description=colored(cmd.description, 'yellow'),\n formatter_class=ColoredHelpFormatter,\n add_help=False\n )\n command.add_argument(\n '-h', '--help',\n action='help',\n default=argparse.SUPPRESS,\n help='Show this help message and exit.'\n )\n command.titles('Arguments', 'Options', color='cyan')\n\n # Add arguments and bind command\n for arg, opt in cmd.arguments.items():\n command.add_argument(arg, **opt)\n command.set_defaults(func=cmd.handle)\n self.commands[name] = command", "def create_command_line(clt_desc, input_dict):\n args = []\n if 'arguments' in clt_desc:\n for argument in clt_desc['arguments']:\n if not isinstance(argument, str):\n exit_system_error('Sorry: I only understand strings for arguments.'\n 'Please use the inputs to pass arguments from input parameters.')\n args.append((-1, clt_desc['arguments']))\n\n for parameter in clt_desc['inputs']:\n args.append(create_argument(parameter, input_dict))\n\n args.sort(key=lambda arg: arg[0])\n\n # drop keys and flatten\n command_line = []\n for _, items in args:\n if items is not None:\n command_line.extend(items)\n return command_line", "def __init__(self, name='', instructions='', *prompts):\n self.name = name\n self.instructions = instructions\n self.prompts = []\n for x in prompts:\n if isinstance(x, string_types):\n self.add_prompt(x)\n else:\n self.add_prompt(x[0], x[1])", "def add_command(self, name, fct):\r\n self.cmds[name] = fct", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def genCmd(self, cmdinfo, name, alias):\n OutputGenerator.genCmd(self, cmdinfo, name, alias)\n\n # Add a typeCategory{} entry for the category of this type.\n self.addName(self.typeCategory, name, 'protos')\n\n if alias:\n # Add name -> alias mapping\n self.addName(self.alias, name, alias)\n else:\n # May want to only emit definition on this branch\n True\n\n params = [param.text for param in cmdinfo.elem.findall('param/name')]\n self.protos[name] = params\n paramTypes = [param.text for param in cmdinfo.elem.findall('param/type')]\n for param_type in paramTypes:\n self.addMapping(name, param_type)", "def _add_switch(self, switchdesc):\n # Check switch definition parameters\n switch_attributes = list(switchdesc.keys())\n if not set(switch_attributes).issubset(self.switch_attributes):\n raise ValueError(\n \"Switch definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported switch parameters are '{2}'.\".format(\n json.dumps(switchdesc, indent=2), self._xmlfile,\n self.switch_attributes))\n for mandatory_parameter in self.switch_attributes[:2]:\n if mandatory_parameter not in switch_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in switch definition: \"\n \"'{1}' defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(switchdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the switch is not already reserved\n switch_name = switchdesc[self.switch_attributes[0]][0]\n if switch_name in self._switches:\n raise ValueError(\n \"The switch name '{0}' defined in '{1}' is \"\n \"already used.\".format(switch_name, self._xmlfile))\n\n # Create the switch control\n switch_paths = {}\n for pathdesc in switchdesc[self.switch_attributes[1]]:\n path_name = pathdesc[self.switch_path[0]][0]\n path_boxes = [box[self.unit_attributes[0]]\n for box in pathdesc[self.switch_path[1]]]\n switch_paths[path_name] = path_boxes\n switch_keys = list(switch_paths.keys())\n control = controls[\"Enum\"](\n choices=tuple(switch_keys),\n switch_name=switch_name,\n desc=(\"Switch between paths '{0}:{1}' defined in pipeline '{2}'\"\n \".\".format(switch_name, \"-\".join(switch_keys), self.id)))\n setattr(self.inputs, switch_name, control)\n self._switches[switch_name] = switch_paths\n control.add_observer(\"value\", self._update_activation)\n control.value = switch_keys[0]", "def main(verbose, debug, names):\n initialize(debug)\n\n echome(names)\n # click.echo(\"hello\")\n # see\n # https://www.brianthicks.com/post/2014/11/03/build-modular-command-line-tools-with-click/", "def get_description(name=None, defaults=None):\n new_description = dict()\n # Variable name is required (obviously)\n new_description['Variable'] = defaults['Variable']\n # Variable type is required (obviously)\n original_type = defaults.get('Type', None)\n type_hint = \"\" if original_type is None else f\" [was '{original_type}']\"\n new_description['Type'] = prompt_options_list(\n prompt=f\"Variable type{type_hint}: \",\n default=original_type,\n options=[item['Type'] for item in SECRET_TYPES]\n )\n # Prompt (also serves as description) is required\n prompt = (\"Descriptive string to prompt user when \"\n \"setting value: \")\n cli = Input(prompt,\n default=defaults.get('Prompt'),\n word_color=colors.foreground[\"yellow\"])\n result = cli.launch()\n new_description['Prompt'] = result\n # Alternative option set is (no pun intended) optional\n if new_description['Type'] in ['string']:\n prompt = \"Acceptable options from which to chose: \"\n cli = Input(prompt,\n default=defaults.get('Options'),\n word_color=colors.foreground[\"yellow\"])\n result = cli.launch()\n # TODO(dittrich): BUG or ISSUE in waiting.\n # Items in an Options list can't end in '.*' without\n # causing confusion with ',*' wildcard feature.\n # Maybe switch to using '|' for alternaives instead?\n if '.*' in result:\n if result == '.*':\n msg = \"[-] '.*' is not valid: did you mean '*'?\"\n else:\n msg = (\"[-] options list items can't have '.*' \"\n \"wildcards: did you mean to end with ',*'?\")\n raise RuntimeError(msg)\n new_description['Options'] = result\n # Environment variable export alternative optional\n prompt = \"Environment variable to export: \"\n cli = Input(prompt,\n default=defaults.get('Export', ' '),\n word_color=colors.foreground[\"yellow\"])\n result = cli.launch()\n if result not in [' ', '', None]:\n new_description['Export'] = result\n print('')\n return new_description", "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardware_address}</HardwareAddress></DeviceDetails>\".format(hardware_address=hardware_address)\n\n if comp_var_dict is not None:\n comp_keys = comp_var_dict.keys()\n if len(comp_keys) > 0:\n for comp_key in comp_keys:\n # Build requested variable list\n command += \"<Components><Component><Name>{comp_key}</Name><Variables>\".format(comp_key=comp_key)\n variables = comp_var_dict[comp_key]\n for var in variables:\n command += \"<Variable><Name>{var}</Name></Variable>\".format(var=var)\n command += \"</Variables></Component></Components>\"\n else:\n # Request all variables from all components\n command += \"<Components><All>Y</All></Components>\"\n\n # Close command\n command += \"</Command>\"\n \n return command", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def select_cmd():\r\n help_dict = {'1': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory, rebuild ISO\",\r\n '2': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory\",\r\n '3': \"Create LZ, GMA/TPL\",\r\n '4': \"Create .lz.raw\",\r\n '5': \"Compress .lz.raw\",\r\n '6': \"Create LZ\",\r\n '7': \"Create GMA/TPL\",\r\n '8': \"Replace stage files in <ISO path>//stage directory, run GCR\",\r\n '9': \"Rebuild ISO\"\r\n }\r\n\r\n for h_key, h_value in help_dict.items():\r\n print(\"{} ----> {}\".format(h_key, h_value))\r\n\r\n while True:\r\n cmd_input = input(\"\\nEnter command: \")\r\n if cmd_input == \"\":\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n elif cmd_input.lower() not in help_dict.keys():\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n else:\r\n return cmd_input.lower()", "def add_command(self, name, desc, func=None):\n assert type(name) == str\n assert type(desc) == str\n if func is not None:\n assert callable(func)\n\n def wrap_argparse(parser, args, func):\n \"\"\"Convenience function calls argparse with list of args and calls func with them\"\"\"\n pargs = parser.parse_args(args)\n return func(**vars(pargs))\n\n assert name not in self.cmd2func, \"Command with same name already defined on this level!\"\n\n self.cmd_list.append((name, desc))\n if func is None:\n m = necapy(name=name, desc=desc)\n self.cmd2func[name] = m.parse\n return m\n else:\n ap = argparse.ArgumentParser(description=desc)\n self.cmd2func[name] = lambda args: wrap_argparse(ap, args, func)\n return ap", "def __getattr__(self, name):\n return Command(self.cmd, name)", "def add_command(self, cmd: click.Command, name: str = None, \n help_priority: int=DEFAULT_HELP_PRIORITY,\n hidden: bool=False) -> None:\n help_priorities = self.help_priorities\n help_priorities[cmd.name] = help_priority\n if hidden:\n self.hidden_commands.append(cmd.name)\n \n return super().add_command(cmd, name)", "def __init__(self, cmd, **kwargs):\n # Init method - should be subclassed!\n # \n # The subclass methods should look like this:\n # \n # def __init__(self, cmd=\"muscle\", **kwargs):\n # self.parameters = [...]\n # AbstractCommandline.__init__(self, cmd, **kwargs)\n # \n # i.e. There should have an optional argument \"cmd\" to set the location\n # of the executable (with a sensible default which should work if the\n # command is on the path on Unix), and keyword arguments. It should\n # then define a list of parameters, all objects derived from the base\n # class _AbstractParameter.\n # \n # The keyword arguments should be any valid parameter name, and will\n # be used to set the associated parameter.\n self.program_name = cmd\n try:\n parameters = self.parameters\n except AttributeError:\n raise AttributeError(\"Subclass should have defined self.parameters\")\n #Create properties for each parameter at run time\n aliases = set()\n for p in parameters:\n for name in p.names:\n if name in aliases:\n raise ValueError(\"Parameter alias %s multiply defined\" \\\n % name)\n aliases.add(name)\n name = p.names[-1]\n if _re_prop_name.match(name) is None:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name in python\"\n % repr(name))\n if name in _reserved_names:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name because it is \"\n \"a reserved word in python\" % repr(name))\n if name in _local_reserved_names:\n raise ValueError(\"Final parameter name %s cannot be used as \"\n \"an argument or property name due to the \"\n \"way the AbstractCommandline class works\"\n % repr(name))\n #Beware of binding-versus-assignment confusion issues\n def getter(name):\n return lambda x : x._get_parameter(name)\n def setter(name):\n return lambda x, value : x.set_parameter(name, value)\n def deleter(name):\n return lambda x : x._clear_parameter(name)\n doc = p.description\n if isinstance(p, _Switch):\n doc += \"\\n\\nThis property controls the addition of the %s \" \\\n \"switch, treat this property as a boolean.\" % p.names[0]\n else:\n doc += \"\\n\\nThis controls the addition of the %s parameter \" \\\n \"and its associated value. Set this property to the \" \\\n \"argument value required.\" % p.names[0]\n prop = property(getter(name), setter(name), deleter(name), doc)\n setattr(self.__class__, name, prop) #magic!\n for key, value in kwargs.iteritems():\n self.set_parameter(key, value)", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def at_cmdset_creation(self):\n self.add(default_cmds.CmdLook())\n self.add(default_cmds.CmdSay())", "def add_cmd(self, name: str, help_str: str, cmd_fn: typing.Callable, arg: str = None, arg_help: str = None):\n self.cmd_names.append(name)\n cmd = self.cli_subparsers.add_parser(name, help=help_str)\n cmd.set_defaults(func=cmd_fn)\n if arg is not None:\n cmd.add_argument(arg, help=arg_help)", "def at_cmdset_creation(self):\n self.add(power.CmdPower())\n self.add(CmdCursedBone())\n # self.add(CmdDeathSpike())\n \"\"\"\n self.add(CmdAnchor())\n self.add(CmdBloodCloak())\n self.add(CmdBloodShield())\n self.add(CmdBloodWard())\n self.add(CmdBodyToMind())\n self.add(CmdBoneScythe())\n self.add(CmdCircleDeath())\n self.add(CmdCorpseBurst())\n self.add(CmdCorpseDrain())\n self.add(CmdCreateBloodGem())\n self.add(CmdCurseDeathLink())\n self.add(CmdDeathRain())\n self.add(CmdDeathWard())\n self.add(CmdDisease())\n self.add(CmdBoneDust())\n self.add(CmdGloom())\n self.add(CmdImbueBlood())\n self.add(CmdImbueDeath())\n self.add(CmdMassSilence())\n self.add(CmdMassSleep())\n self.add(CmdMassAnchor())\n self.add(CmdMassWeakness())\n self.add(CmdPlague())\n self.add(CmdPoison())\n self.add(CmdPoisonCloud())\n self.add(CmdSilence())\n self.add(CmdSleep())\n self.add(CmdSpectralHunter())\n self.add(CmdSummon())\n self.add(CmdSummonCorruptedMan())\n self.add(CmdSummonCursedArmy())\n self.add(CmdSummonCursedMan())\n self.add(CmdSummonReanimatedMan())\n self.add(CmdTeleport())\n self.add(CmdTeleportOther())\n self.add(CmdTransferPain())\n self.add(CmdVampiricClaw())\n self.add(CmdVampiricTouch())\n self.add(CmdWeakness())\n \"\"\"", "def add_cmd(self, cmd, name=\"\"):\n if cmd:\n self.cmds.add((cmd, name))" ]
[ "0.5894565", "0.5829862", "0.58222437", "0.57666796", "0.57283145", "0.5709916", "0.56761897", "0.56290853", "0.54858685", "0.53589696", "0.535696", "0.5352396", "0.5328115", "0.53232706", "0.53141785", "0.52954423", "0.52918684", "0.52891374", "0.52443874", "0.5243947", "0.5227459", "0.51917166", "0.51894116", "0.51799357", "0.51448506", "0.5144495", "0.51386625", "0.5137742", "0.51215196", "0.5118888" ]
0.7299439
0
return point subtraction PQ
def sub(self, P, Q): if not (isinstance(P, list) and isinstance(Q, list)): raise ValueError("point P (resp. Q) must be [px, py] (resp. [qx, qy])") #if not (self.whetherOn(P) and self.whetherOn(Q)): # raise ValueError("either points must not be point on curve.") if (P != self.infpoint) and (Q == self.infpoint): return P elif (P == self.infpoint) and (Q == self.infpoint): return self.infpoint x = Q[0] y = -Q[1]-self.a1*Q[0]-self.a3 R = [x, y] if (P == self.infpoint) and (Q != self.infpoint): return R else: return self.add(P, R)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distPlusProche(p,pts):\r\n\tpoints=pts[::]\r\n\r\n\t#on enleve p de la liste des points en cas de répétition\r\n\tif p in points:\r\n\t\tpoints.remove(p)\r\n\t#on initialise mini avec la distance au premier point de la liste des points\r\n\tmini=sqrt((p[0]-points[0][0])**2+(p[1]-points[0][1])**2)\r\n\t#on compare chaque point avec p pour trouver la plus petite distance\r\n\tfor p2 in points:\r\n\t\tdist=sqrt((p2[0]-p[0])**2+(p2[1]-p[1])**2)\r\n\t\tif dist<mini:\r\n\t\t\tmini=dist\r\n\r\n\treturn round(mini)", "def pent( a, b ):\n return P(a) - b", "def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works", "def __sub__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set - other._points_set\n if isinstance(other, Multipoint)\n else [point\n for point in self._points\n if point not in other])\n if isinstance(other, Compound)\n else NotImplemented)", "def substract(self, point):\n\n return Point(point.x - self.x, point.y - self.y, point.z - self.z)", "def __sub__(self, other):\n return Point(self.x - other[0], self.y - other[1])", "def __sub__(self, other):\n return Point([c1 - c2 for (c1, c2) in zip(self, other)])", "def test__point_subtraction__given_two_points__return_correct_vector():\n assert Point((0, 1, 2)) - Point((3, 4, 5)) == Vector((-3, -3, -3))", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def vectorize(point_a:tuple, point_b:tuple)->tuple:\n return (point_b[0] - point_a[0], point_b[1] - point_a[1])", "def resta(x, y):\n return x - y", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def test_add_substract_points(self):\r\n pt1 = Point(x=1, y=2)\r\n pt2 = Point(x=3, y=40)\r\n summed = pt1 + pt2\r\n substracted = pt1 - pt2\r\n\r\n assert summed.x == 4\r\n assert summed.y == 42\r\n assert substracted.x == -2\r\n assert substracted.y == -38", "def __sub__(self, other):\n try:\n return Point(self.row - other.row, self.col - other.col)\n except AttributeError: # Can also take a tuple (row, col)\n return Point(self.row - other[0], self.col - other[1])", "def cp(temp,pres):\n g_tt = liq_g(2,0,temp,pres)\n cp = -temp * g_tt\n return cp", "def add(self, P, Q):\n if not (isinstance(P, list) and isinstance(Q, list)):\n raise ValueError(\"point P (resp. Q) must be [px, py] (resp. [qx, qy])\")\n #if not (self.whetherOn(P) and self.whetherOn(Q)):\n # raise ValueError(\"either points must not be point on curve.\")\n\n if (P == self.infpoint) and (Q != self.infpoint):\n return Q\n elif (P != self.infpoint) and (Q == self.infpoint):\n return P\n elif (P == self.infpoint) and (Q == self.infpoint):\n return self.infpoint\n\n if self.ch == 0:\n # FIXME\n if P[0] == Q[0]:\n if P[1]+Q[1]+self.a1*Q[0]+self.a3 == 0:\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1]-P[1])/(Q[0]-P[0])\n t = (P[1]*Q[0]-Q[1]*P[0])/(Q[0]-P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R\n else:\n if not (P[0] - Q[0]):\n # FIXME: the condition is P[0] == Q[0] intuitively,\n # but sometimes there are int vs FiniteFieldElement\n # comparisons ...\n if not (P[1]+Q[1]+self.a1*Q[0]+self.a3):\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1] - P[1]*self.basefield.one) / (Q[0] - P[0])\n t = (P[1]*Q[0] - Q[1]*P[0]*self.basefield.one)/ (Q[0] - P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R", "def __rsub__(self, other):\n try:\n return Point(other.row - self.row, other.col - self.col)\n except AttributeError: # Can also take a tuple (row, col)\n return Point(other[0] - self.row, other[1] - self.col)", "def minus_priority(self):\n #return (-self.size, self.vec, self.score) # kinda \"depth-first\"\n #return (self.vec, self.score, -self.size) # kinda \"breadth-first\"\n return (self.score, -self.size, self.vec) # kinda \"depth-first with back-tracking\"", "def bk_p(g,p,r,x, counter):\n print(\"counter:\\t\", counter)\n print(\"p:\\t\", p)\n print(\"r:\\t\", r)\n print(\"x:\\t\", x)\n result = []\n pux = set(p).union(set(x))\n if len(pux) == 0:\n print(\"return r: \", r)\n return r\n else:\n pivot = list(pux)[0]\n pN = [n for n in g.neighbors(pivot)]\n p_copy = copy.deepcopy(p)\n print(\"P_COPY\",p_copy)\n print(\"P_N\",pN)\n for n in pN:\n p_copy.remove(n)\n for v in p_copy:\n print(\"v: \", v)\n vNeighbors = [a for a in g.neighbors(v)]\n print(\"vNeighbors: \\t\", vNeighbors)\n # pnnv, ruv, xnnv\n print(\"================================\")\n result.append(bk_p(g, intersection(p,vNeighbors), r+[v], intersection(x, vNeighbors), counter+1))\n print(\"================================\")\n print(\"result:\\t\", result, \"\\tv: \", v)\n p.remove(v)\n x.append(v)\n print(\"fp:\\t\", p)\n print(\"fr:\\t\", r)\n print(\"fx:\\t\", x)\n return result\n\n def bk_p2(g,r,p,x, counter=0):\n \"\"\"\n Bron-Kerbosch algorithm without pivots (implemented with python sets)\n g: an nx graph\n r: disjoint set of vertices of graph g\n p: disjoint set of vertices of graph g\n x: disjoint set of vertices of graph g\n \"\"\"\n pux = p.union(x)\n if not pux:\n print('Maximal clique found: ', r)\n\n # choose an pivot from pux\n pivot = next(iter(pux))\n neighborsP = list(g.neighbors(pivot))\n for v in p.difference(neighborsP):\n neighborsV = list(g.neighbors(v))\n bk_p(g, r.union([v]), p.intersection(neighborsV), x.intersection(neighborsV), counter+1)\n p.remove(v)\n x.add(v)", "def __neg__(self) -> PointType:\n return self * -1", "def getXp(self, Xs_minus, Vs_minus, As_minus):\n return Xs_minus + Vs_minus + 0.5*As_minus", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def getProximity(tuples):\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1]", "def test_subtraction__vector_point(self):\n\n a1 = points.Point(3, 2, 1)\n a2 = vectors.Vector(5, 6, 7)\n\n a3 = a1 - a2\n\n self.assertEqual(a3, points.Point(-2, -4, -6))", "def subtract(x, y):\n\n return x - y", "def add_points(self, P, Q):\n xp, yp = P\n xq, yq = Q\n s = ((yq - yp) * self.inv(xq - xp)) % self.fp\n xr = (s ** 2 - xp - xq) % self.fp\n yr = (s * (xp - xr) - yp) % self.fp\n return (xr, yr)", "def inner_point(self, point) -> Vec:\n return self.pos - point", "def fix_point(h, lower, upper):\n return brentq(lambda x: x - h(x), lower, upper)", "def __sub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.sub)", "def eucdist3d(self,point1,point2):\n#\t\tif not isinstance(point1,np.ndarray):\n#\t\t\tpoint1 = np.array(point1)\n#\t\t\tpoint2 = np.array(point2)\n\t\t\n\t\treturn(((point2[0]-point1[0])**2 + (point2[1]-point1[1])**2 + (point2[2]-point1[2])**2)**0.5)" ]
[ "0.6209783", "0.61296666", "0.6119025", "0.60799325", "0.601719", "0.60124487", "0.5937315", "0.589836", "0.5858705", "0.5838884", "0.58062047", "0.5790279", "0.5783874", "0.5770873", "0.5753462", "0.57291937", "0.56820744", "0.5646534", "0.56439734", "0.5640057", "0.56353885", "0.56164974", "0.5601966", "0.55168587", "0.55000293", "0.54958683", "0.54919064", "0.5491245", "0.5488683", "0.5484912" ]
0.6233835
0
create ECoverGF object. coefficient must be length 5 or 2 list(represent as Weierstrass form), any coefficient is in basefield. basefield must be FiniteField subclass object (i.e. FinitePrimeField or FiniteExtendedField object.)
def __init__(self, coefficient, basefield=None): # parameter parse try: character = basefield.getCharacteristic() field = basefield except AttributeError: # backward compatibility if isinstance(basefield, int): field = finitefield.FinitePrimeField.getInstance(basefield) character = basefield else: raise ValueError("basefield must be FiniteField object.") coeffs_list = [] if isinstance(coefficient, list): for c in coefficient: if isinstance(c, int): coeff = field.createElement(c) elif c in field: coeff = c else: raise ValueError("coefficient not in basefield.") coeffs_list.append(coeff) # general initialize ECGeneric.__init__(self, coeffs_list, field) zero = self.basefield.zero one = self.basefield.one # format attribute if self.ch == 2: if len(self) == 5: # FIXME if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]: self.a1 = one self.a2 = coeffs_list[1] self.a3 = zero self.a4 = zero self.a6 = coeffs_list[4] self.b2 = one self.b4 = zero self.b6 = zero self.b8 = self.a6 self.c4 = one self.c6 = one self.disc = self.a6 self.j = self.disc.inverse() elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]: self.a1 = zero self.a2 = zero self.a3 = coeffs_list[2] self.a4 = coeffs_list[3] self.a6 = coeffs_list[4] self.b2 = zero self.b4 = zero self.b6 = self.a3**2 self.b8 = self.a4**2 self.c4 = zero self.c6 = zero self.disc = self.a3**4 self.j = zero else: raise ValueError("coefficient may be not representation of EC.") else: raise ValueError("coefficient may only use full Weierstrass form for characteristic 2.") elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6 # FIXME if len(self) == 5: if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]: self.a1 = zero self.a2 = coeffs_list[1] self.a3 = zero self.a4 = zero self.a6 = coeffs_list[4] self.b2 = self.a2 self.b4 = zero self.b6 = self.a6 self.b8 = self.a2*self.a6 self.c4 = self.b2**2 self.c6 = 2*self.b2**3 self.disc = -self.a2**3*self.a6 self.j = (-self.a2**3)*self.a6.inverse() elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]: self.a1 = zero self.a2 = zero self.a3 = zero self.a4 = coeffs_list[3] self.a6 = coeffs_list[4] self.b2 = zero self.b4 = 2*self.a4 self.b6 = self.a6 self.b8 = 2*self.a4**2 self.c4 = zero self.c6 = zero self.disc = -self.a4**3 self.j = zero else: raise ValueError("can't defined EC.") if not self.disc: raise ValueError("this curve is singular.") else: raise ValueError("coefficient is less or more, can't defined EC.") else: if len(self) == 5: self.a1 = coeffs_list[0] self.a2 = coeffs_list[1] self.a3 = coeffs_list[2] self.a4 = coeffs_list[3] self.a6 = coeffs_list[4] self.b2 = self.a1**2+4*self.a2 self.b4 = self.a1*self.a3+2*self.a4 self.b6 = self.a3**2+4*self.a6 self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2 self.c4 = self.b2**2-24*self.b4 self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6 self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6 if self.disc: self.j = self.c4**3*self.disc.inverse() else: raise ValueError("coefficients creates singular curve.") elif len(self) == 2: self.a = coeffs_list[0] self.b = coeffs_list[1] self.a1 = zero self.a2 = zero self.a3 = zero self.a4 = self.a self.a6 = self.b self.b2 = zero self.b4 = 2*self.a self.b6 = 4*self.b self.b8 = -(self.a**2) self.c4 = -48*self.a self.c6 = -864*self.b self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6 if self.disc: self.j = self.c4**3*self.disc.inverse() else: raise ValueError("coefficients creates singular curve.") else: raise ValueError("coefficient is less or more, can't defined EC.") self.ord = None self.abelian = None self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one}, self.basefield)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, coefficient, basefield=None):\n\n try:\n character = basefield.getCharacteristic()\n self.basefield = basefield\n except:\n # backward compatibility support\n if isinstance(basefield, rational.RationalField) or (not basefield):\n character = 0\n self.basefield = rational.theRationalField\n elif isinstance(basefield, int):\n character = basefield\n if character == 1 or character < 0:\n raise ValueError(\"basefield characteristic must be 0 or prime.\")\n self.basefield = finitefield.FinitePrimeField.getInstance(character)\n else:\n raise ValueError(\"basefield must be FiniteField.\")\n\n self.ch = character\n self.infpoint = [self.basefield.zero]\n if isinstance(coefficient, list):\n self.coefficient = coefficient\n if self.ch == 0:\n if len(self) == 5:\n self.a1 = self.coefficient[0]\n self.a2 = self.coefficient[1]\n self.a3 = self.coefficient[2]\n self.a4 = self.coefficient[3]\n self.a6 = self.coefficient[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n elif len(self) == 2:\n self.a = self.coefficient[0]\n self.b = self.coefficient[1]\n self.a1 = 0\n self.a2 = 0\n self.a3 = 0\n self.a4 = self.coefficient[0]\n self.a6 = self.coefficient[1]\n self.b2 = 0\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -self.a**2\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = (self.c4**3-self.c6**2)/1728\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n if self.disc == 0:\n raise ValueError(\"this curve is singular.\")\n self.j = (self.c4**3)/self.disc\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4,\n 3:self.basefield.one},\n self.basefield)\n else:\n pass # support for subclass\n else:\n raise ValueError(\"parameters must be (coefficient, basefield)\")", "def EC(coefficient, basefield=None):\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except:\n # backward compatiblity\n if isinstance(basefield, rational.RationalField) or not basefield:\n field = rational.RationalField\n character = 0\n elif isinstance(basefield, int):\n field = finitefield.FinitePrimeField(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be RationalFieid or FiniteField.\")\n\n if isinstance(coefficient, list):\n if not character:\n return ECoverQ(coefficient)\n else:\n return ECoverGF(coefficient, field)", "def build(self):\n\n # Create a custom grid, fe_set \n nfe = 6\n fe_a = 1/4.0\n fe_b = 0.2\n fe_set = [0, 0.004]\n for i in range(1,nfe+1):\n if i < nfe*fe_a:\n fe_set.append(i*fe_b/(nfe*fe_a))\n elif i == nfe: \n fe_set.append(1)\n else:\n fe_set.append(fe_b + (i-nfe*fe_a)*(1-fe_b)/(nfe*(1-fe_a)))\n\n \"\"\"\n Args:\n dae_method = method to use for calcuating derivatives (default = OCLR)\n - BFD1 - 1st order backwards finite difference\n - OCLR - Orthogonal collocation, Lagrange-Radau\n - OCLL - Orthogonal collocation, Lagrange-Legendre\n press_drop = Pressure drop correlation for superficial velocity calc.\n - SimplifiedP - simplified pressure correlations \n - Ergun - Ergun equation\n fe_set = set of normalised finite element locations\n nfe = number of finite elements for bed discretization (default = 15)\n (not used if fe_set specified)\n ncp = number of collocation points (OCLR or OCLL only, default = 3)\n \"\"\" \n\n # Create unit model for fuel reactor\n self.MB_fuel = MB_CLC_fuel.MB(\n parent=self,\n dae_method = 'OCLR',\n press_drop = 'Ergun',\n fe_set = fe_set,\n ncp = 3)", "def __init__(self, base=None, mvtype=None, fct=False, blade_rep=False):\n\n def make_scalar(self, base): # make a scalar (grade 0)\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = make_coef(self, base) * MV.ONE\n else:\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n return self\n\n def make_vector(self, base): # make a vector (grade 1)\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=1, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[1]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=1, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=1, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[1]))))\n else:\n result = S.Zero\n for (coef, base) in zip(base, MV.blades[1]):\n result += coef * base\n self.obj = result\n self.igrade = 1\n self.blade_rep = True\n return self\n\n def make_basisvector(self, base):\n raise NotImplementedError(\"Don't know how to compute basis vectors of class %\" % self.__class__)\n\n def make_basisbivector(self, base):\n raise NotImplementedError(\"Don't know how to compute basis bivectors of class %\" % self.__class__)\n\n def make_grade(self, base): # if base is 'A,n' then make a grade n multivector\n if isinstance(base, str):\n base_lst = base.split(',')\n base = base_lst[0]\n n = int(base_lst[1])\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=n, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[n]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=n, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=n, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[n]))))\n else:\n raise TypeError('Cannot make_grade for base = %s' % base)\n self.igrade = n\n self.blade_rep = True\n return self\n\n def make_grade2(self, base): # grade 2 multivector\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=2, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[2]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=2, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=2, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[2]))))\n else:\n raise TypeError('!!!!Cannot make_grade2 for base = ' + str(base) + '!!!!\\n')\n self.igrade = 2\n self.blade_rep = True\n return self\n\n def make_pseudo(self, base): # multivector of grade MV.dim\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=MV.dim, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[MV.dim]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=MV.dim, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=MV.dim, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[MV.dim]))))\n else:\n raise TypeError('!!!!Cannot make_pseudo for base = ' + str(base) + '!!!!\\n')\n self.igrade = MV.dim\n self.blade_rep = True\n return self\n\n def make_spinor(self, base): # multivector with all even grades\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = Symbol(base) * MV.ONE\n for rank in range(2, MV.dim1, 2):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n raise TypeError('Cannot make_mv for base = %s' % base)\n self.igrade = -1\n self.blade_rep = True\n return self\n\n def make_mv(self, base):\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = Symbol(base) * MV.ONE\n for rank in range(1, MV.dim1):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n raise TypeError('!!!!Cannot make_mv for base = ' + str(base) + '!!!!\\n')\n self.igrade = -1\n self.blade_rep = True\n return self\n\n MVtypes = {'scalar': make_scalar,\n 'vector': make_vector,\n 'basisvector': make_basisvector,\n 'basisbivector': make_basisbivector,\n 'grade': make_grade,\n 'grade2': make_grade2,\n 'bivector': make_grade2,\n 'pseudo': make_pseudo,\n 'spinor': make_spinor,\n 'mv': make_mv}\n\n self.fct = fct\n self.is_base = False\n self.is_grad = False\n self.print_blades = MV.print_blades\n self.fmt = 1\n\n if mvtype is None:\n if base in (None, S.Zero): # Default is zero multivector\n self.blade_rep = True\n self.obj = S.Zero\n self.igrade = 0\n elif isinstance(base, str): # Base or blade basis multivector\n self.is_base = True\n if '*' in base:\n self.blade_rep = False\n self.igrade = -1\n else:\n if '^' in base:\n self.blade_rep = True\n self.igrade = base.count('^') + 1\n else:\n self.blade_rep = blade_rep\n self.igrade = 1\n self.obj = Symbol(base, commutative=False)\n elif isinstance(base, MV): # Copy constructor\n self.blade_rep = base.blade_rep\n self.obj = base.obj\n self.igrade = base.igrade\n self.fct = base.fct\n self.is_base = base.is_base\n self.is_grad = base.is_grad\n elif isinstance(base, (Expr, Symbol)): # Gets properties of multivector from Expr\n if base.is_commutative:\n self.obj = base * MV.ONE\n self.blade_rep = True\n self.igrade = 0\n else:\n if isinstance(base, (Add, Mul)): # Complex expression\n MV.characterize_expression(self, base)\n elif isinstance(base, Symbol):\n if not base.is_commutative:\n if base == MV.ONE:\n self.obj = base\n self.blade_rep = True\n self.igrade = 0\n elif base in MV.blades_flat: # basis blade\n self.obj = base\n self.blade_rep = True\n self.igrade = MV.blade_grades[base]\n elif base in MV.bases_flat: # basis base\n self.obj = base\n self.blade_rep = False\n self.igrade = -1\n else:\n raise ValueError('MV(' + str(base) + ') is not allowed in constructor\\n' +\n 'non-commutative argument is not a base\\n')\n else: # scalar sympy symbol\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n elif isinstance(base, Number):\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n else: # Preconfigured multivector types\n MVtypes[mvtype](self, base)", "def __repr__(self):\n return \"EC(%s, %s)\" % (str(self.coefficient), repr(self.basefield))", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.low = 0.0\n self.type = 'Exponential'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__ (self, equ_type='none' , extra='none'):\n self.equ_type = self.set_equation_type(equ_type)\n self.coeffs = []\n self.extra = str(extra)", "def __init__(self, coef_list):\n assert type(coef_list) is list, 'error message indicating that coef is not a list'\n self.degree = len(coef_list) - 1\n self.coefs = []\n for coef in coef_list:\n self.coefs.append(coef)", "def __init__(self, F, m, B, a=None):\n if a is None: # don't make the stupid noob mistake of putting a=[]\n a = [] # in the function signature above.\n\n # Initialize constants.\n self.m = m\n d = F.degree()\n self.d = d\n self.n = m*d\n self.B = B\n self.gamma = hermite_constant(self.n-self.d)\n\n self.F = F\n self.Z_F = F.maximal_order()\n self.Foo = F.real_embeddings()\n self.dF = abs(F.disc())\n self.Fx = PolynomialRing(F, 'xF')\n\n self.beta = [[]]*m\n self.gnk = [[]]*m\n\n self.trace_elts = []\n\n Z_Fbasis = self.Z_F.basis()\n\n # Initialize variables.\n if a == []:\n # No starting input, all polynomials will be found; initialize to zero.\n self.a = [0]*m + [1]\n self.amaxvals = [[]]*m\n anm1s = [[i] for i in range(0,m//2+1)]\n for i in range(1,self.d):\n for j in range(len(anm1s)):\n anm1s[j] = [ anm1s[j] + [i] for i in range(m)]\n anm1s = sum(anm1s, [])\n anm1s = [sum([Z_Fbasis[i]*a[i] for i in range(self.d)]) for a in anm1s]\n # Minimize trace in class.\n import numpy\n for i in range(len(anm1s)):\n Q = [ [ v(m*x) for v in self.Foo] + [0] for x in Z_Fbasis] + [[v(anm1s[i]) for v in self.Foo] + [10**6]]\n pari_string = '['+';'.join([','.join([\"%s\"%ii for ii in row]) for row in zip(*Q)])+']'\n adj = pari(pari_string).qflll()[self.d]\n anm1s[i] += sum([m*Z_Fbasis[ii]*int(adj[ii])//int(adj[self.d]) for ii in range(self.d)])\n\n self.amaxvals[m-1] = anm1s\n self.a[m-1] = self.amaxvals[m-1].pop()\n self.k = m-2\n\n bl = math.ceil(1.7719*self.n)\n br = max([1./m*(am1**2).trace() + \\\n self.gamma*(1./(m**d)*self.B/self.dF)**(1./(self.n-d)) for am1 in anm1s])\n br = math.floor(br)\n T2s = self.F._positive_integral_elements_with_trace([bl,br])\n self.trace_elts.append([bl,br,T2s])\n\n elif len(a) <= m+1:\n # First few coefficients have been specified.\n # The value of k is the largest index of the coefficients of a which is\n # currently unknown; e.g., if k == -1, then we can iterate\n # over polynomials, and if k == n-1, then we have finished iterating.\n if a[len(a)-1] != 1:\n raise ValueError(\"a[len(a)-1](=%s) must be 1 so polynomial is monic\"%a[len(a)-1])\n\n raise NotImplementedError(\"These have not been checked.\")\n\n k = m-len(a)\n self.k = k\n a = [0]*(k+1) + a\n self.amaxvals = [[]]*m\n for i in range(0,n+1):\n self.a[i] = a[i]\n\n # Bounds come from an application of Lagrange multipliers in degrees 2,3.\n self.b_lower = [-1./m*(v(self.a[m-1]) +\n (m-1.)*math.sqrt(v(self.a[m-1])**2 - 2.*(1+1./(m-1))*v(self.a[m-2]))) for v in self.Foo]\n self.b_upper = [-1./m*(v(self.a[m-1]) -\n (m-1.)*math.sqrt(v(self.a[m-1])**2 - 2.*(1+1./(m-1))*v(self.a[m-2]))) for v in self.Foo]\n if k < m-2:\n bminmax = [lagrange_degree_3(n,v(self.a[m-1]),v(self.a[m-2]),v(self.a[m-3])) for v in self.Foo]\n self.b_lower = bminmax[0]\n self.b_upper = bminmax[1]\n\n # Annoying, but must reverse coefficients for numpy.\n gnk = [binomial(j,k+2)*a[j] for j in range(k+2,n+1)]\n self.beta[k+1] = [[self.b_lower] + numpy.roots([v(gnk[i]) for i in range(len(gnk))].reverse()).tolist().sort() + [self.b_upper] for v in self.Foo]\n\n # Now to really initialize gnk.\n self.gnk[k+1] = [[0] + [binomial(j,k+1)*v(a[j]) for j in range (k+2,m+1)] for v in self.Foo]\n else:\n # Bad input!\n raise ValueError(\"a has length %s > m+1\"%len(a))", "def createDeformationPenaltyObjectiveField(self, deformActiveMeshGroup, strainActiveMeshGroup,\n curvatureActiveMeshGroup):\n if deformActiveMeshGroup.getSize() == 0:\n return None\n applyStrainPenalty = strainActiveMeshGroup.getSize() > 0\n applyCurvaturePenalty = curvatureActiveMeshGroup.getSize() > 0\n if not (applyStrainPenalty or applyCurvaturePenalty):\n return None\n numberOfGaussPoints = 3\n fieldmodule = self._fitter.getFieldmodule()\n mesh = self._fitter.getHighestDimensionMesh()\n modelCoordinates = self._fitter.getModelCoordinatesField()\n modelReferenceCoordinates = self._fitter.getModelReferenceCoordinatesField()\n fibreField = self._fitter.getFibreField()\n dimension = mesh.getDimension()\n coordinatesCount = modelCoordinates.getNumberOfComponents()\n assert (coordinatesCount == dimension) or fibreField, \\\n \"Must supply a fibre field to use strain/curvature penalties with mesh dimension < coordinate components.\"\n deformationGradient1 = deformationGradient1raw = fieldmodule.createFieldGradient(\n modelCoordinates, modelReferenceCoordinates)\n fibreAxes = None\n fibreAxesT = None\n if fibreField:\n # convert to local fibre directions, with possible dimension reduction for 2D, 1D\n fibreAxes = fieldmodule.createFieldFibreAxes(fibreField, modelReferenceCoordinates)\n if not fibreAxes.isValid():\n self.getFitter().printLog()\n if dimension == 3:\n fibreAxesT = fieldmodule.createFieldTranspose(3, fibreAxes)\n elif dimension == 2:\n fibreAxesT = fieldmodule.createFieldComponent(\n fibreAxes, [1, 4, 2, 5, 3, 6] if (coordinatesCount == 3) else [1, 4, 2, 5])\n else: # dimension == 1\n fibreAxesT = fieldmodule.createFieldComponent(\n fibreAxes, [1, 2, 3] if (coordinatesCount == 3) else [1, 2] if (coordinatesCount == 2) else [1])\n deformationTerm = None\n if applyStrainPenalty:\n # large strain\n if fibreField:\n deformationGradient1 = fieldmodule.createFieldMatrixMultiply(\n coordinatesCount, deformationGradient1raw, fibreAxesT)\n deformationGradient1T = fieldmodule.createFieldTranspose(coordinatesCount, deformationGradient1)\n C = fieldmodule.createFieldMatrixMultiply(dimension, deformationGradient1T, deformationGradient1)\n alpha = self._fitter.getStrainPenaltyField()\n I = fieldmodule.createFieldConstant(\n [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] if (dimension == 3) else\n [1.0, 0.0, 0.0, 1.0] if (dimension == 2) else\n [1.0])\n E2 = C - I\n wtSqE2 = fieldmodule.createFieldDotProduct(alpha, E2 * E2)\n deformationTerm = wtSqE2\n if applyCurvaturePenalty:\n # second order Sobolev smoothing terms\n # don't do gradient of deformationGradient1 with fibres due to slow finite difference evaluation\n deformationGradient2 = fieldmodule.createFieldGradient(deformationGradient1raw, modelReferenceCoordinates)\n if fibreField:\n # convert to local fibre directions\n deformationGradient2a = fieldmodule.createFieldMatrixMultiply(\n coordinatesCount*coordinatesCount, deformationGradient2, fibreAxesT)\n # transpose each deformation component of deformationGradient2a to remultiply by fibreAxesT\n if dimension == 1:\n deformationGradient2aT = deformationGradient2a\n else:\n transposeComponents = None\n if coordinatesCount == 3:\n if dimension == 3:\n transposeComponents = [1, 4, 7, 2, 5, 8, 3, 6, 9,\n 10, 13, 16, 11, 14, 17, 12, 15, 18,\n 19, 22, 25, 20, 23, 26, 21, 24, 27]\n elif dimension == 2:\n transposeComponents = [1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12, 13, 15, 17, 14, 16, 18]\n elif coordinatesCount == 2:\n transposeComponents = [1, 3, 2, 4, 5, 7, 6, 8]\n deformationGradient2aT = \\\n fieldmodule.createFieldComponent(deformationGradient2a, transposeComponents)\n deformationGradient2 = fieldmodule.createFieldMatrixMultiply(\n dimension*coordinatesCount, deformationGradient2aT, fibreAxesT)\n beta = self._fitter.getCurvaturePenaltyField()\n wtSqDeformationGradient2 = \\\n fieldmodule.createFieldDotProduct(beta, deformationGradient2*deformationGradient2)\n deformationTerm = \\\n (deformationTerm + wtSqDeformationGradient2) if deformationTerm else wtSqDeformationGradient2\n if not deformationTerm.isValid():\n self.getFitter().printLog()\n raise AssertionError(\"Scaffoldfitter: Failed to get deformation term\")\n\n deformationPenaltyObjective = fieldmodule.createFieldMeshIntegral(\n deformationTerm, self._fitter.getModelReferenceCoordinatesField(), deformActiveMeshGroup)\n deformationPenaltyObjective.setNumbersOfPoints(numberOfGaussPoints)\n return deformationPenaltyObjective", "def make_field(self):\n uniaxial = self.u[0]*self.u[1]*self.u[2] != 0\n cubic = self.c1[0]*self.c1[1]*self.c1[2]*self.c2[0]*self.c2[1]*self.c2[2] != 0\n @nb.njit\n def field_func(m):\n heff = self.hext + field.demagnetization(m, self.Nd)\n if uniaxial:\n heff += field.uniaxial_anisotropy(m, self.u, self.hu1, self.hu2)\n if cubic:\n heff += field.cubic_anisotropy(m, self.c1, self.c2, self.c3, self.hc1, self.hc2)\n return heff\n self.field = field_func", "def __init__(self,\r\n gibbs_e=None,\r\n internal_e=None,\r\n enthalpy_e=None,\r\n helmholtz_e=None,\r\n electronic_e=None,\r\n zero_point_e=None,\r\n Cv_trans_term=None,\r\n Cv_rot_term=None,\r\n Cv_vib_term=None,\r\n Cv_to_Cp=None,\r\n entropy_term=None,\r\n PV_term=None,\r\n\r\n # main_energy=\"gibbs\",\r\n ):\r\n #| - __init__\r\n self.gibbs_e = gibbs_e\r\n self.internal_e = internal_e\r\n self.enthalpy_e = enthalpy_e\r\n self.helmholtz_e = helmholtz_e\r\n\r\n self.electronic_e = electronic_e\r\n self.zero_point_e = zero_point_e\r\n\r\n self.Cv_trans_term = Cv_trans_term\r\n self.Cv_rot_term = Cv_rot_term\r\n self.Cv_vib_term = Cv_vib_term\r\n self.Cv_to_Cp = Cv_to_Cp\r\n\r\n self.entropy_term = entropy_term\r\n self.PV_term = PV_term\r\n\r\n if self.internal_e is None:\r\n self.internal_e = self.calc_internal_energy()\r\n\r\n if self.enthalpy_e is None:\r\n self.enthalpy_e = self.calc_enthalpy_energy()\r\n\r\n if self.gibbs_e is None:\r\n self.gibbs_e = self.calc_gibbs_free_energy()\r\n #__|\r", "def _new_ncc(self):\n # is this used at all in equations.py (other than rxn), or just in atmospheres?\n # the naming conventions here force cartesian, generalize to spheres etc. make sense?\n # should \"necessary quantities\" logic occur here?\n field = self.domain.new_field()\n if self.dimensions > 1:\n field.meta['x']['constant'] = True\n if self.dimensions > 2:\n field.meta['y']['constant'] = True \n return field", "def __init__(self,\n nalpha: int,\n nbeta: int,\n norb: int,\n fcigraph: Optional[FciGraph] = None,\n dtype: 'Dtype' = numpy.complex128) -> None:\n validate_config(nalpha, nbeta, norb)\n\n if not (fcigraph is None) and (nalpha != fcigraph.nalpha() or\n nbeta != fcigraph.nbeta() or\n norb != fcigraph.norb()):\n raise ValueError(\"FciGraph does not match other parameters\")\n\n if fcigraph is None:\n self._core = FciGraph(nalpha, nbeta, norb)\n else:\n self._core = fcigraph\n self._dtype = dtype\n\n if fqe.settings.use_accelerated_code:\n # Use the same C extension for both cases by default\n self._low_thresh = 0.0\n else:\n self._low_thresh = 0.3\n self._nele = self.nalpha() + self.nbeta()\n self._m_s = self.nalpha() - self.nbeta()\n self.coeff = numpy.zeros((self.lena(), self.lenb()), dtype=self._dtype)", "def __init__(self, atomlist, atomtypes, partial_charges, lattice_vectors,\n chromophores, verbose=1, **kwds):\n import ff\n self.force_field = ff.ForceField(atomlist, atomtypes, partial_charges,\n lattice_vectors, chromophores, verbose=verbose, **kwds)", "def __init__(self):\n GinacFunction.__init__(self, \"real_part\",\n conversions=dict(maxima='realpart',\n sympy='re'),\n alt_name=\"real\")", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.k = 1.0\n self.type = 'Weibull'\n self.distType = 'Continuous'\n self.low = 0.0\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, x=None, f=np.inf, evals=None):\r\n self.x = x\r\n self.x_geno = None\r\n self.f = f if f is not None and f is not np.nan else np.inf\r\n self.evals = evals\r\n self.evalsall = evals\r\n self.last = BlancClass()\r\n self.last.x = x\r\n self.last.f = f", "def create_object(self, version, key, **kwds):\n R, x = key\n\n if R is rings.QQ:\n from .ell_rational_field import EllipticCurve_rational_field\n return EllipticCurve_rational_field(x, **kwds)\n elif is_NumberField(R):\n from .ell_number_field import EllipticCurve_number_field\n return EllipticCurve_number_field(R, x)\n elif rings.is_pAdicField(R):\n from .ell_padic_field import EllipticCurve_padic_field\n return EllipticCurve_padic_field(R, x)\n elif is_FiniteField(R) or (is_IntegerModRing(R) and R.characteristic().is_prime()):\n from .ell_finite_field import EllipticCurve_finite_field\n return EllipticCurve_finite_field(R, x)\n elif R in _Fields:\n from .ell_field import EllipticCurve_field\n return EllipticCurve_field(R, x)\n from .ell_generic import EllipticCurve_generic\n return EllipticCurve_generic(R, x)", "def __init__(self):\n super().__init__()\n self.n = 0.0\n self.p = 0.0\n self.type = 'Binomial'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, coeff, expt):\n # assign a attribute to the object.\n \n self.coeff = coeff\n \n if self.coeff.is_nan() :\n self.expt = 0\n elif self.is_zero() == False :\n self.expt = int(expt)\n else :\n self.expt = 0", "def __init__(self, num_radial, cutoff, envelope_exponent=6) -> None:\n\n super(BesselBasisLayer, self).__init__()\n self.cutoff = cutoff\n self.envelope = Envelope(envelope_exponent)\n\n self.freq = torch.nn.Parameter(torch.Tensor(num_radial))\n\n self.reset_parameters()", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, coefficients):\n self.coefficients = coefficients", "def from_field(\n cls, field, R, phi, Z, params={}, method=\"cubic\", extrap=False, period=None\n ):\n R, phi, Z = map(np.asarray, (R, phi, Z))\n rr, pp, zz = np.meshgrid(R, phi, Z, indexing=\"ij\")\n shp = rr.shape\n coords = np.array([rr.flatten(), pp.flatten(), zz.flatten()]).T\n BR, BP, BZ = field.compute_magnetic_field(coords, params, basis=\"rpz\").T\n return cls(\n R,\n phi,\n Z,\n BR.reshape(shp),\n BP.reshape(shp),\n BZ.reshape(shp),\n method,\n extrap,\n period,\n )", "def __init__(self):\n gr.sync_block.__init__(\n self,\n name='e_dd_ff', # will show up in GRC\n in_sig=[np.complex64,np.complex64],\n out_sig=[np.complex64]\n )", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def __init__(self, channel_group=None, gain_provider=None, name=None,\n floating=False, field=None, derivative_order=0):\n\n self.field = str(field)\n self.is_floating = bool(floating)\n self.derivative_order = int(derivative_order)\n super().__init__(channel_group=channel_group,\n gain_provider=gain_provider,\n name=name)", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'" ]
[ "0.7700947", "0.7550249", "0.5849628", "0.57276404", "0.5529813", "0.54793984", "0.5438513", "0.5421472", "0.5373099", "0.53684545", "0.5364152", "0.53532964", "0.53478926", "0.5296262", "0.5295814", "0.5292675", "0.5222808", "0.5217962", "0.5138603", "0.51261836", "0.5115176", "0.51071393", "0.5104221", "0.51041394", "0.5102109", "0.5088845", "0.50718653", "0.50694245", "0.50662345", "0.50517905" ]
0.77263165
0
Return (E(F_q) mod 2, 2). char(F_q) > 3 is required. For odd characteristic > 3, t = E(F_q) mod 2 is determined by gcd(self.cubic, X^q X) == 1 t = 1 mod 2.
def _Schoof_mod2(self): if not self.b: result = 0 _log.debug("(%d, 2) #" % result) else: linearfactors = UniVarPolynomial({card(self.basefield):self.basefield.one, 1:-self.basefield.one}, self.basefield) if GCD(self.cubic, linearfactors).degree() == 0: result = 1 _log.debug("(%d, 2) ##" % result) else: result = 0 _log.debug("(%d, 2) ###" % result) return (result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cdq2(f, x, h=1e-5):\n return (f(x+h)-f(x-h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def fdq2(f, x, h=1e-5):\n return (-3*f(x) + 4*f(x+h) - f(x+2*h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def _qod_func(self, q):\n if self.qodulus is None:\n return q\n else:\n return q % self.qodulus", "def chi_c_real(params):\n Qi = Q_i(params)\n Qc = params['Q_e_real'].value\n return ((4 * Qc * Qi) /\n (Qc + Qi) ** 2)", "def __q2v_cf(self, w, rhom, q):\n return float(q / (rhom + q / w))", "def compute(self, P, Q, ec):\n f = Fp2Element(1, 0)\n V = ec.aToJ(P)\n nP = ec.neg(P)\n n = ec.r - 1\n b = naf(ec.r, 2)\n for i in range(len(b) - 2, -1, -1):\n u = self.encDouble(self, V, Q, ec)\n # f = f^2 * u % prime\n f = Fp2Element.multFp2(Fp2Element, Fp2Element.squareFp2(Fp2Element, f, ec.q), u, ec.q)\n if b[i] == 1:\n u = self.encAdd(self, V, P, Q, ec)\n f = Fp2Element.multFp2(Fp2Element, f, u, ec.q)\n if b[i] == -1:\n u = self.encAdd(self, V, nP, Q, ec)\n f = Fp2Element.multFp2(Fp2Element, f, u, ec.q)\n\n finalExp = (ec.q + 1) // ec.r\n conj = Complex.conjugate(f, ec.q)\n f = Complex.divide(conj, f, ec.q)\n return Complex.complexPow(f, finalExp, ec)", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def g_q_2_hisano(self, mchi):\n w = self.MW**2/mchi**2\n def gT2(x):\n bx = np.sqrt(1-x/4+0*1j)\n out = np.real_if_close(1/bx/4 * x * (2 - 4*x + x**2) * np.arctan(2*bx/np.sqrt(x))\\\n - np.sqrt(x)/4 * (1 - 2*x - x*(2-x)*np.log(x)))\n return out\n return (self.alpha)**2/(self.sw**4) * ((self.dchi**2 - 1)/(8*self.MW**3) * gT2(w))", "def cdq4(f, x, h=1e-5):\n return (f(x-2*h) - 8*f(x-h) + 8*f(x+h) - f(x+2*h))/(12*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def f_o(self, z):\n\t return exp(-(z/self.MU)**self.CIRC_3)", "def _Schoof_mod_l(self, l):\n if l == 2:\n return self._Schoof_mod2()\n E = self.cubic\n D = self.division_polynomials\n lth_div = self.division_polynomials[l]\n field = self.basefield\n bfsize = card(field)\n x = UniVarPolynomial({1:field.one}, field)\n k = bfsize % l\n x_frob = PolyPow(x, bfsize, lth_div) #x_frob=x^q\n x_frobfrob = PolyPow(x_frob, bfsize, lth_div) #x_frobfrob=x^{q^2}\n\n # test for x^{q^2} - x\n f, P = self._sub1(k, x_frobfrob - x, lth_div)\n f0, f3 = f[0], f[3]\n\n if GCD(lth_div, P).degree() > 0:\n if arith1.legendre(k, l) == -1:\n _log.debug(\"%s $\" % str((0, l)))\n return (0, l)\n\n # arith1.legendre(k, l) == 1 <=> k is QR\n w = arith1.modsqrt(k, l)\n f, P = self._sub1(w, x_frob - x, lth_div)\n\n if GCD(lth_div, P).degree() == 0: # coprime\n _log.debug(\"%s $$$$\" % str((0, l)))\n return (0, l)\n\n # there exist non trivial common divisors\n g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1)\n P = self._sub2(w, g0, f[3], lth_div)\n\n if GCD(lth_div, P).degree() > 0:\n _log.debug(\"%s $$\" % str((2*w % l, l)))\n return (2*w % l, l)\n else:\n _log.debug(\"%s $$$\" % str((-2*w % l, l)))\n return (-2*w % l, l)\n\n else: # coprime (GCD(P, lth_div).degree() == 0)\n Y = x - x_frobfrob\n g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1)\n g1 = PolyPow(g0, bfsize + 1, lth_div) #y^(q^2-1)\n f = -self._sub2(k, g1, f3, lth_div)\n h1 = PolyMulRed([f, f], lth_div)\n if k % 2 == 0:\n g = (PolyMulRed([Y, E, f3], lth_div) - f0) * 4\n h0 = PolyMulRed([g, g], lth_div)\n aux1 = PolyMulRed([f0, h0], lth_div) + h1\n X_d = PolyMulRed([E, f3, h0], lth_div)\n else:\n g = (PolyMulRed([Y, f3], lth_div) - PolyMulRed([E, f0], lth_div)) * 4\n h0 = PolyMulRed([g, g], lth_div)\n aux1 = PolyMulRed([E, PolyMulRed([f0, h0], lth_div) + h1], lth_div)\n X_d = PolyMulRed([f3, h0], lth_div)\n X_n = PolyMulRed([X_d, x_frobfrob + x_frob + x], lth_div) - aux1\n\n # loop of t\n e_q = PolyPow(self.cubic, bfsize, lth_div)\n for t in range(1, (l - 1)//2 + 1):\n Z_d_x, Z_n_x = self._Z_x(t, D, e_q, bfsize, lth_div)\n # X_n * Z_d_x == X_d * Z_n_x (mod lth_div)?\n if not PolyMod(X_n * Z_d_x - X_d * Z_n_x, lth_div):\n break\n else: # loop of t exhausted\n _log.debug(\"%s @@@\" % str((0, l)))\n return (0, l)\n\n # found: X_n * Z_d_x == X_d * Z_n_x (mod lth_div)\n y0 = PolyMulRed([-2*x_frobfrob - x, X_d], lth_div) + aux1\n if k % 2 == 0:\n Y_d = PolyMulRed([E, D[k], g, X_d], lth_div)\n else:\n Y_d = PolyMulRed([D[k], g, X_d], lth_div)\n Y_n = -PolyMulRed([g1, Y_d], lth_div) - PolyMulRed([f, y0], lth_div)\n Z_d_y, Z_n_y = self._Z_y(t, D, g0, bfsize, lth_div)\n\n # Y_n * Z_d_y == Y_d * Z_n_y (mod lth_div)?\n if PolyMod(Y_n * Z_d_y - Y_d * Z_n_y, lth_div):\n _log.debug(\"%s @@\" % str((l-t, l)))\n return (l-t, l)\n else:\n _log.debug(\"%s @\" % str((t, l)))\n return (t, l)", "def terms_gcd(f):\n J, F = dmp_terms_gcd(f.rep, f.lev, f.dom)\n return J, f.per(F)", "def eccentricity(self):\n return sqrt(self.f * 2 - self.f ** 2)", "def sqf_part(self, f):\n domain = self.domain\n\n if domain.is_FiniteField:\n g = self.one\n for f, _ in self.sqf_list(f)[1]:\n g *= f\n\n return g\n\n if not f:\n return f\n\n gcd = f\n for x in self.gens:\n gcd = self.gcd(gcd, f.diff(x))\n sqf = f // gcd\n\n if domain.is_Field:\n return sqf.monic()\n return sqf.primitive()[1]", "def f_q_hisano(self, mchi):\n w = self.MW**2/mchi**2\n def gH(x):\n bx = np.sqrt(1-x/4+0*1j)\n out = np.real_if_close(-2/bx * (2 + 2*x - x**2) * np.arctan(2*bx/np.sqrt(x))\\\n + 2*np.sqrt(x) * (2 - x*np.log(x)))\n return out\n return (self.alpha)**2/(4*self.Mh**2*self.sw**4) * ((self.dchi**2 - 1)/(8*self.MW) * gH(w))", "def get_chisq(g, nu, g_h, bins, bin_num2, ig_num): # checked 2017-7-9!!!\n G_h = g - nu * g_h\n num = numpy.histogram(G_h, bins)[0]\n n1 = numpy.flip(num[0:bin_num2],axis=0)\n n2 = num[bin_num2:]\n xi = (n1 - n2) ** 2 / (n1 + n2)\n return numpy.sum(xi[:len(xi)-ig_num]) * 0.5,n1,n2", "def pchisq(x, df):\n \n if df % 2 == 0:\n dchi = 0.5 * math.exp(-0.5 * x)\n f = 1.0 - 2.0 * dchi\n for i in range(4, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n else:\n f = 2.0 * pnorm(math.sqrt(x), 0.0, 1.0) - 1.0\n dchi = math.exp(-0.5 * x) / math.sqrt(2.0 * math.pi * x)\n for i in range(3, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n return f", "def Qc(I, dT, a, b, c, d, e, f, g, h, i, k):\n x1 = I # I\n x2 = dT # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def ADM_QCD2(nf):\n # Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290]. Note that we have different prefactors and signs. \n gamma_gq = -32/3\n # Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290]. Note that we have different prefactors and signs. \n gamma_5gq = 8\n gamma_QCD2_gq = np.array([5*[gamma_gq]])\n gamma_QCD2_5gq = np.array([5*[gamma_5gq]])\n gamma_QCD2_1 = np.zeros((34,154))\n gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,111))))\n gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,103))))\n gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,95))))\n gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,87))))\n gamma_QCD2_6 = np.zeros((116,154))\n gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3, gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]\n\n if nf == 5:\n return gamma_QCD2\n elif nf == 4:\n return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 2)\n elif nf == 3:\n return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 2)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def qFelder(h):\n\treturn (0.92 + 0.153 * h/1.01) * math.sqrt(9.8 * (2/3.0 * h)**3)", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def get_coherent_scattering_factor(self, element: str, q):\n if element not in self.coherent_params.index.values:\n raise ElementNotImplementedException(element)\n fs_coh = 0\n s = q / (4 * np.pi)\n for ind in range(1, 5):\n A = self.coherent_params['a' + str(ind)][element]\n B = self.coherent_params['b' + str(ind)][element]\n fs_coh += A * np.exp(-B * s ** 2)\n\n C = self.coherent_params['c'][element]\n fs_coh += C\n return fs_coh", "def get_coherent_scattering_factor(self, element: str, q):\n if element not in self.coherent_param.index.values:\n raise ElementNotImplementedException(element)\n fs_coh = 0\n s = q / (4 * np.pi)\n for ind in range(1, 5):\n A = self.coherent_param['A' + str(ind)][element]\n B = self.coherent_param['B' + str(ind)][element]\n fs_coh += A * np.exp(-B * s ** 2)\n\n C = self.coherent_param['C'][element]\n fs_coh += C\n return fs_coh", "def zzx_quo_const(f, c):\n if not c:\n raise ZeroDivisionError('polynomial division')\n elif not f:\n return f\n else:\n h = []\n\n for coeff in f:\n if coeff % c:\n raise ExactQuotientFailed('%s does not divide %s' % (c, coeff))\n else:\n h.append(coeff // c)\n\n return h", "def _Fqt_comp(vh,q):\n r_scale = 6.45/60\n edges,count,x_lim = vh\n # make sure that vh is normalized\n count = count/np.sum(count)\n\n return np.sum(count * np.exp(1j*q*edges*r_scale))", "def gcd(p, q):\n if q == 0:\n return p\n return gcd(q, p % q)", "def C2Q(self, C):\n\n return self.euler2Q(self.C2euler(C))", "def factor(self):\r\n\t\t\r\n\t\t# get gcf\r\n\t\tg = self.extract()\r\n\t\t\r\n\t\t# invert and multiply\r\n\t\tv = g.invert()\r\n\t\tf = self.multiply(v)\r\n\t\t\r\n\t\treturn f,g" ]
[ "0.62949723", "0.6072696", "0.6047673", "0.60077405", "0.5969821", "0.58834136", "0.5818508", "0.5785059", "0.5768946", "0.57607234", "0.57607234", "0.5751263", "0.57377785", "0.56866693", "0.5677456", "0.56761426", "0.5631534", "0.55737287", "0.5560761", "0.55440897", "0.55433005", "0.5529394", "0.55236316", "0.5516785", "0.5513079", "0.5510017", "0.54955894", "0.54863214", "0.5473965", "0.5473834" ]
0.6415687
0
Return q + 1 E(F_q) mod l.
def _Schoof_mod_l(self, l): if l == 2: return self._Schoof_mod2() E = self.cubic D = self.division_polynomials lth_div = self.division_polynomials[l] field = self.basefield bfsize = card(field) x = UniVarPolynomial({1:field.one}, field) k = bfsize % l x_frob = PolyPow(x, bfsize, lth_div) #x_frob=x^q x_frobfrob = PolyPow(x_frob, bfsize, lth_div) #x_frobfrob=x^{q^2} # test for x^{q^2} - x f, P = self._sub1(k, x_frobfrob - x, lth_div) f0, f3 = f[0], f[3] if GCD(lth_div, P).degree() > 0: if arith1.legendre(k, l) == -1: _log.debug("%s $" % str((0, l))) return (0, l) # arith1.legendre(k, l) == 1 <=> k is QR w = arith1.modsqrt(k, l) f, P = self._sub1(w, x_frob - x, lth_div) if GCD(lth_div, P).degree() == 0: # coprime _log.debug("%s $$$$" % str((0, l))) return (0, l) # there exist non trivial common divisors g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1) P = self._sub2(w, g0, f[3], lth_div) if GCD(lth_div, P).degree() > 0: _log.debug("%s $$" % str((2*w % l, l))) return (2*w % l, l) else: _log.debug("%s $$$" % str((-2*w % l, l))) return (-2*w % l, l) else: # coprime (GCD(P, lth_div).degree() == 0) Y = x - x_frobfrob g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1) g1 = PolyPow(g0, bfsize + 1, lth_div) #y^(q^2-1) f = -self._sub2(k, g1, f3, lth_div) h1 = PolyMulRed([f, f], lth_div) if k % 2 == 0: g = (PolyMulRed([Y, E, f3], lth_div) - f0) * 4 h0 = PolyMulRed([g, g], lth_div) aux1 = PolyMulRed([f0, h0], lth_div) + h1 X_d = PolyMulRed([E, f3, h0], lth_div) else: g = (PolyMulRed([Y, f3], lth_div) - PolyMulRed([E, f0], lth_div)) * 4 h0 = PolyMulRed([g, g], lth_div) aux1 = PolyMulRed([E, PolyMulRed([f0, h0], lth_div) + h1], lth_div) X_d = PolyMulRed([f3, h0], lth_div) X_n = PolyMulRed([X_d, x_frobfrob + x_frob + x], lth_div) - aux1 # loop of t e_q = PolyPow(self.cubic, bfsize, lth_div) for t in range(1, (l - 1)//2 + 1): Z_d_x, Z_n_x = self._Z_x(t, D, e_q, bfsize, lth_div) # X_n * Z_d_x == X_d * Z_n_x (mod lth_div)? if not PolyMod(X_n * Z_d_x - X_d * Z_n_x, lth_div): break else: # loop of t exhausted _log.debug("%s @@@" % str((0, l))) return (0, l) # found: X_n * Z_d_x == X_d * Z_n_x (mod lth_div) y0 = PolyMulRed([-2*x_frobfrob - x, X_d], lth_div) + aux1 if k % 2 == 0: Y_d = PolyMulRed([E, D[k], g, X_d], lth_div) else: Y_d = PolyMulRed([D[k], g, X_d], lth_div) Y_n = -PolyMulRed([g1, Y_d], lth_div) - PolyMulRed([f, y0], lth_div) Z_d_y, Z_n_y = self._Z_y(t, D, g0, bfsize, lth_div) # Y_n * Z_d_y == Y_d * Z_n_y (mod lth_div)? if PolyMod(Y_n * Z_d_y - Y_d * Z_n_y, lth_div): _log.debug("%s @@" % str((l-t, l))) return (l-t, l) else: _log.debug("%s @" % str((t, l))) return (t, l)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _qod_func(self, q):\n if self.qodulus is None:\n return q\n else:\n return q % self.qodulus", "def get_Lfrac_lam(Lfrac, Lstar_10, qlf):\n D = np.tile(qlf.c_B*Lstar_10**qlf.k_B, [len(Lfrac),1])\n Lfrac_2D = np.tile(Lfrac, [len(qlf.c_B),1]).T\n return np.sum(D,axis=1)/np.sum(D*Lfrac_2D**(qlf.k_B-1),axis=1)", "def Lq(self):\n if not self.isVaild():\n pass\n temp = ((self.r()**self.C)*self.Rho()) / \\\n (math.factorial(self.C)*((1 - self.Rho())**2))\n return temp*self.P0()", "def fhll(self, qL, qR):\n hL, hR, uL, uR, cL, cR = self.get_param(qL, qR)\n fL = np.array([hL*uL, hL*uL*uL+0.5*9.81*hL*hL])\n fR = np.array([hR*uR, hR*uR*uR+0.5*9.81*hR*hR])\n uS = 0.5*(uL+uR) + cL - cR\n cS = 0.25*(uL-uR) + 0.5*(cL+cR)\n sL = min(uL-cL,uS-cS);\n sR = max(uR+cR,uS+cS)\n fS = (sR*fL-sL*fR+sL*sR*(qR-qL))/(sR-sL)\n if sL>=0:\n return fL\n elif sL<0 and sR>0:\n return fS\n else:\n return fR", "def qi(self, tl, psi_l):\n\t try: \n\t ans = .622*esat(tl)/P_ATM*exp(psi_l*1000000.*VW/R/tl)\n\t except OverflowError:\n\t ans = 0.\n\t return ans", "def kl(p, q):\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))", "def rp_nel_1d(q_l, q_r, aux_l, aux_r, aux_global):\n\n meqn = 2\n mwaves = 2\n # Convenience\n nrp = np.size(q_l, 0)\n\n # Set up arrays for return values\n fwave = np.empty((nrp, meqn, mwaves))\n s = np.empty((nrp, mwaves))\n amdq = np.empty((nrp, meqn))\n apdq = np.empty((nrp, meqn))\n\n # Linearized bulk modulus, sound speed, and impedance:\n bulkl = sigmap(q_l[:, 0], aux_l[:, 1])\n bulkr = sigmap(q_r[:, 0], aux_r[:, 1])\n cl = np.sqrt(bulkl / aux_l[:, 0])\n cr = np.sqrt(bulkr / aux_r[:, 0])\n zl = cl * aux_l[:, 0]\n zr = cr * aux_r[:, 0]\n\n # Jumps:\n du = q_r[:, 1] / aux_r[:, 0] - q_l[:, 1] / aux_l[:, 0]\n dsig = sigma(q_r[:, 0], aux_r[:, 1]) - sigma(q_l[:, 0], aux_l[:, 1])\n\n b1 = -(zr * du + dsig) / (zr + zl)\n b2 = -(zl * du - dsig) / (zr + zl)\n\n # Compute the f-waves\n # 1-Wave\n fwave[:, 0, 0] = b1\n fwave[:, 1, 0] = b1 * zl\n s[:, 0] = -cl\n\n # 2-Wave\n fwave[:, 0, 1] = b2\n fwave[:, 1, 1] = b2 * (-zr)\n s[:, 1] = cr\n\n # Compute the left going and right going fluctuations\n for m in range(meqn):\n amdq[:, m] = fwave[:, m, 0]\n apdq[:, m] = fwave[:, m, 1]\n\n return fwave, s, amdq, apdq", "def KL(P, Q):\n assert P.size() == Q.size()\n # To prevent divide by zero\n Q = Q + 1e-15\n return torch.sum(P * torch.log(P / Q))", "def K_l_m(l,m):\n assert abs(m)<=l\n return ((2*l+1)*np.math.factorial(l-abs(m))/(4*np.pi*np.math.factorial(l+abs(m))))**0.5", "def sqf_part(self, f):\n domain = self.domain\n\n if domain.is_FiniteField:\n g = self.one\n for f, _ in self.sqf_list(f)[1]:\n g *= f\n\n return g\n\n if not f:\n return f\n\n gcd = f\n for x in self.gens:\n gcd = self.gcd(gcd, f.diff(x))\n sqf = f // gcd\n\n if domain.is_Field:\n return sqf.monic()\n return sqf.primitive()[1]", "def kl(p, q):\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))", "def q_hkl_exp(self, h, k, l):\n \n qhkl, qhkl_vector = self.q_hkl(h,k,l)\n \n qx, qy, qz = qhkl_vector\n qx, qy, qz = self.rotate_q_exp(qx, qy, qz)\n \n \n return (qhkl, (qx, qy, qz) )", "def KL(p, q):\n return np.sum(p * np.log(p / q))", "def kl(p, q):\n p = np.asarray(p, dtype=float)\n q = np.asarray(q, dtype=float)\n\n return np.where(p != 0, p * np.log(p / q), 0).sum()", "def L(self):\n if not self.isVaild():\n pass\n return self.Lq() + self.r()", "def lfunc(x,u):\n return mpc.mtimes(u.T, R, u) + mpc.mtimes((x-goal).T, Q, (x-goal))", "def D_kl(self, Q, P):\n return np.sum(np.multiply(Q, np.log(np.divide(Q, P))))", "def bernoulli_kullback_leibler(p: float, q: float) -> float:\n kl1, kl2 = 0, np.infty\n if p > 0:\n if q > 0:\n kl1 = p*np.log(p/q)\n\n if q < 1:\n if p < 1:\n kl2 = (1 - p) * np.log((1 - p) / (1 - q))\n else:\n kl2 = 0\n return kl1 + kl2", "def f41(R):\n return f3(f3(R, R), R) % MOD", "def do_factor(m=4, q=11):\n from sage.rings.finite_rings.integer_mod_ring import IntegerModRing\n from sage.matrix.constructor import identity_matrix, block_matrix\n from sage.matrix.matrix_space import MatrixSpace\n from sage.rings.integer_ring import IntegerRing\n from sage.modules.free_module_integer import IntegerLattice\n \n \n m=n+1\n ZZ = IntegerRing()\n ZZ_q = IntegerModRing(q)\n \n \n \n from sage.arith.all import euler_phi\n from sage.misc.functional import cyclotomic_polynomial\n \n for a in range(\n R = ZZ_q['x']#.quotient(cyclotomic_polynomial(k, 'x'), 'x')\n f=cyclotomic_polynomial(m,'x')\n return f.factor()", "def kl_divergence(p, q):\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n\n # Add error message when p==0\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))", "def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")", "def kullback_leibler(p: np.ndarray, q: np.ndarray) -> float:\n kl = 0\n for pi, qi in zip(p, q):\n if pi > 0:\n if qi > 0:\n kl += pi * np.log(pi/qi)\n else:\n kl = np.inf\n return kl", "def _malmquist_q_lngamma(self, gamma, f_bin):\n func = np.poly1d(self.malm_pars[::-1])\n const_factor = (1 - gamma) / (self.high_q ** (1 - gamma) - self.low_q ** (1 - gamma))\n integral = np.sum(\n [p * const_factor / (i + 1 - gamma) * (self.high_q ** (i + 1 - gamma) - self.low_q ** (i + 1 - gamma))\n for i, p in enumerate(self.malm_pars)])\n Pobs = integral if self.malm_pars.size == 1 and self.malm_pars[0] == 1 else self.Pobs\n denom = f_bin*integral + (1-f_bin)*Pobs\n logging.debug('Denominator = {}\\nIntegral = {}\\nPobs = {}\\n'.format(denom, integral, Pobs))\n return np.log(func(self.q)) + np.log(f_bin) + np.log(const_factor) - gamma*self.lnq - np.log(denom)", "def _Q(self, chi, eta, L):\n return self.r**2 * chi**2 * np.cos(eta)**2 + L**2 / np.tan(self.theta)**2", "def KL(P,Q):\n epsilon = 0.00001\n \n #You may want to instead make copies to avoid changing the np arrays.\n P = P+epsilon\n Q = Q+epsilon\n \n divergence = np.sum(P*np.log(P/Q))\n return divergence", "def qFelder(h):\n\treturn (0.92 + 0.153 * h/1.01) * math.sqrt(9.8 * (2/3.0 * h)**3)", "def kl_divergence(self, logits_q, logits_p):\n return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True)", "def E_LE(self,s,l):\n if s>l: return self.E_LE(l,s)\n delta = ((s/l)-(l/(s+l)))\n return delta-delta%self._tau", "def _kl_divergence(p, p_logits, q):\n for tensor in [p, p_logits, q]:\n if not tensor.dtype.is_floating:\n raise ValueError('Input %s must be floating type.', tensor.name)\n p.shape.assert_has_rank(2)\n p_logits.shape.assert_has_rank(2)\n q.shape.assert_has_rank(1)\n return math_ops.reduce_sum(\n p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)" ]
[ "0.63242245", "0.6048697", "0.604789", "0.60234493", "0.6019692", "0.59919757", "0.5964788", "0.5957644", "0.5908941", "0.58902335", "0.58847684", "0.58826154", "0.5871628", "0.58220357", "0.5778957", "0.5749423", "0.57393897", "0.5737276", "0.568636", "0.56816006", "0.56464225", "0.5637205", "0.5612297", "0.5584834", "0.5557359", "0.5550023", "0.5542149", "0.5530257", "0.5481115", "0.5479131" ]
0.7297415
0
Return the Frobenius trace t = q + 1 E(F_q), where q is basefield cardinality. If index is an integer greater than 1, then return the trace t = q^r + 1 E(F_q^r) for a subfield curve defined over F_q.
def trace(self, index=None): bfsize = card(self.basefield) if not self.ord: if bfsize == self.ch: # prime field # special cases if bfsize == 2 or bfsize == 3: trace = self._order_to_trace(self.order()) # trace main block elif bfsize < 10**4: trace = self.naive() elif bfsize < 10**30: trace = self.Shanks_Mestre() else: # self.ch>=10**30 trace = self.Schoof() else: if self.ch in (2, 3): error_message = "no E/F_{%d} trace" % bfsize raise NotImplementedError(error_message) else: trace = self.Schoof() self.ord = self._trace_to_order(trace) # cached else: trace = self._order_to_trace(self.ord) # final result if index is not None: # for subfield curve basetrace = trace trace, oldtrace = basetrace, 2 for i in range(2, index + 1): trace, oldtrace = basetrace*trace - bfsize*oldtrace, trace return trace
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fib(index):\n return round((GR**index)/R5)", "def get_quad_trace(self, quad_idx):\n if quad_idx >= self.num_quads or quad_idx < 0:\n raise I2CException(\"Illegal quad index {} specified\".format(quad_idx))\n\n return self.__quad_traces[quad_idx]", "def fir(X, y, trial_index, window, tr):\n\n # Norm then pad.\n scaler = MinMaxScaler(feature_range=(0, 1))\n X = scaler.fit_transform(X.astype(np.float))\n X = np.vstack([X, np.ones((window, X.shape[1]), dtype=np.float)])\n\n # Save the org y names\n ynames = sorted(np.unique(y))\n ynames = unique_sorted_with_nan(ynames)\n \n # y becomes integers\n y = create_y(y)\n\n # Make the design matrix.\n dm = _create_dm(y, window)\n # dm DEBUG\n #import time\n #np.savetxt(\"dm-{0}\".format(time.strftime(\"%m_%d_%Y_%H_%s_%m\")), dm, fmt=\"%1.0f\")\n dm = np.matrix(dm)\n \n # FIR!\n fir_names = []\n firs = []\n for j in range(X.shape[1]):\n x = np.matrix(X[:,j])\n fir = np.array(np.linalg.pinv(dm.T * dm) * dm.T * x.T)[0:-1] \n ## Drop dummy\n fir = fir.reshape(len(ynames)-1, window) \n\n firs.append(fir)\n fir_names.extend(ynames[1:]) ## Drop nan/baseline\n\n Xfir = np.vstack(firs).transpose()\n fir_names = np.asarray(fir_names)\n\n assert checkX(Xfir)\n assert Xfir.shape[0] == window, (\"After FIR rows not equal to window\")\n assert Xfir.shape[1] == (len(ynames[1:]) * X.shape[1]), (\"After\" \n \"FIR wrong number of features\")\n assert fir_names.shape[0] == Xfir.shape[1], (\"fir_names and Xfir\" \n \"don't match\")\n\n return Xfir, fir_names", "def FIR_estimate(self):\r\n raise NotImplementedError", "def fir(self):\r\n #Passband and stop-band are expressed as fraction of the Nyquist\r\n #frequency:\r\n if self.ub is not None:\r\n ub_frac = self.ub / (self.sampling_rate / 2.)\r\n else:\r\n ub_frac = 1.0\r\n\r\n lb_frac = self.lb / (self.sampling_rate / 2.)\r\n\r\n if lb_frac < 0 or ub_frac > 1:\r\n e_s = \"The lower-bound or upper bound used to filter\"\r\n e_s += \" are beyond the range 0-Nyquist. You asked for\"\r\n e_s += \" a filter between\"\r\n e_s += \"%s and %s percent of\" % (lb_frac * 100, ub_frac * 100)\r\n e_s += \"the Nyquist frequency\"\r\n raise ValueError(e_s)\r\n\r\n n_taps = self._filt_order + 1\r\n\r\n #This means the filter order you chose was too large (needs to be\r\n #shorter than a 1/3 of your time-series )\r\n if n_taps > self.data.shape[-1] * 3:\r\n e_s = \"The filter order chosen is too large for this time-series\"\r\n raise ValueError(e_s)\r\n\r\n # a is always 1:\r\n a = [1]\r\n\r\n sig = ts.TimeSeries(data=self.data, sampling_rate=self.sampling_rate)\r\n\r\n #Lowpass:\r\n if ub_frac < 1:\r\n b = signal.firwin(n_taps, ub_frac, window=self._win)\r\n sig = self.filtfilt(b, a, sig)\r\n\r\n #High-pass\r\n if lb_frac > 0:\r\n #Includes a spectral inversion:\r\n b = -1 * signal.firwin(n_taps, lb_frac, window=self._win)\r\n b[n_taps / 2] = b[n_taps / 2] + 1\r\n sig = self.filtfilt(b, a, sig)\r\n\r\n return sig", "def nfl_fq_i(self, i):\n return op.join(self.nfl_dir, IceFiles2.nfl_fq_format.format(i))", "def get_global_index1F( self , active_fracture_index):\n return self._get_global_index1F( active_fracture_index )", "def f(i):\n return e(2**N-1-i) ^ 2**(N-1)", "def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))", "def f(t, qn, R, e, w):\r\n assert((not np.any(np.isnan(t))) and np.all(np.isfinite(t)) and\r\n np.all(np.isreal(t))),\\\r\n \"t must be real, finite and not NaN\"\r\n assert(len(qn) == 2), \"qn must have length 2\"\r\n assert((not np.any(np.isnan(R))) and np.all(np.isfinite(R)) and\r\n np.all(np.isreal(R))),\\\r\n \"r must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(e))) and np.all(np.isfinite(e)) and\r\n np.all(np.isreal(e))),\\\r\n \"e must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(w))) and np.all(np.isfinite(w)) and\r\n np.all(np.isreal(w))),\\\r\n \"w must be real, finite and not NaN\"\r\n x = qn[0]\r\n y = qn[1]\r\n A = np.array([[R, e],\r\n [e, -1]])\r\n B = np.array([(-1.0+x**2.0-np.cos(t))/(2.0*x),\r\n ((-2+y**2.0-np.cos(w*t))/(2.0*y))])\r\n C = np.array([(np.sin(t)/(2*x)),\r\n (w*np.sin(w*t)/(2*y))])\r\n return np.dot(A, B) - C", "def _rf(self, p):\n return self.faces[:, 0, :] - p # 0 is arbitrary - the other vertices also work", "def order(self, index=None):\n bfsize = card(self.basefield)\n\n if not self.ord:\n if self.ch in (2, 3):\n if bfsize == self.ch == 2:\n self.ord = self._order_2()\n elif bfsize == self.ch == 3:\n self.ord = self._order_3()\n else:\n error_message = \"no E/F_{%d} order\" % bfsize\n raise NotImplementedError(error_message)\n else:\n self.ord = self._trace_to_order(self.trace())\n\n # final result\n if index:\n # for subfield curve\n basetrace = self._order_to_trace(self.ord)\n trace, oldtrace = basetrace, 2\n for i in range(2, index + 1):\n trace, oldtrace = basetrace*trace - bfsize*oldtrace, trace\n return bfsize ** index + 1 - trace\n\n return self.ord", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)", "def f(p, phi, phib, df):\n\treturn -log(p) - df + (p-1)*phi + \\\n\t ( phi*(1-p) + phib + \\\n\t 5./4*alpha*(phi*p)**(9./4)-(9./4)*alpha*(p*phi)**(5./4) - \\\n\t (1./2)(1-p*phi)**2 - (phib/Nb)-5./4*alpha*(phi+phib)**(9./4) + \\\n\t (9./4)*alpha*(phi+phib)**(5.4) + \\\n\t 1./2*(1-phi-phib)**2 ) * Ns", "def tt_irt1(q, f, xsf):\n c_irt1 = lib.tt_irt1\n c_irt1.restype = None\n c_irt1.argtypes = [c_int, POINTER(c_int), POINTER(c_double), POINTER(c_int), POINTER(c_double), c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]\n # d n xsf ttrank ttcore M q Z lPz\n\n # Cores of f must be extracted carefully, since we might have discontinuous ps\n core = np.zeros((f.core).size, dtype=np.float64)\n ps_my = 0\n for i in range(0,f.d):\n cri = f.core[range(f.ps[i]-1,f.ps[i+1]-1)]\n core[range(ps_my,ps_my+f.r[i]*f.n[i]*f.r[i+1])] = cri\n ps_my = ps_my + f.r[i]*f.n[i]*f.r[i+1]\n\n d = c_int(f.d)\n n = (np.array(f.n)).ctypes.data_as(POINTER(c_int))\n xsfp = xsf.ctypes.data_as(POINTER(c_double))\n rf = (np.array(f.r)).ctypes.data_as(POINTER(c_int))\n corep = core.ctypes.data_as(POINTER(c_double))\n M = c_int(q.shape[0])\n qp = q.ctypes.data_as(POINTER(c_double))\n \n Z = np.zeros([q.shape[0], q.shape[1]], dtype=np.float64, order='F')\n lPz = np.zeros([q.shape[0]], dtype=np.float64, order='F')\n\n Zp = Z.ctypes.data_as(POINTER(c_double))\n lPzp = lPz.ctypes.data_as(POINTER(c_double))\n\n # Sampler is actually here\n c_irt1(d, n, xsfp, rf, corep, M, qp, Zp, lPzp)\n\n return (Z, lPz)", "def F_cont(self):\n x0 = self.edp_par['x0'].value\n A = self.edp_par['A'].value\n f1 = self.edp_par['f1'].value\n f2 = self.edp_par['f2'].value\n lr = self.latt_par['lambda_r'].value\n w = 0.5 * (self.qx*x0 + self.qz*A)\n arg1 = 0.5*self.qx*lr + w\n arg2 = 0.5*self.qx*lr - w\n fir = x0 * np.sin(w) / lr / w\n sec = (lr-x0) * np.cos(0.5*arg1) * np.sin(arg2) / lr / np.cos(0.5*arg2) / arg2 \n #sec = (-1)**self.k * (lr-x0) * sin(self.k*pi-w)/(self.k*pi-w)/lr\n return (fir + f1*sec + 2*f2*np.cos(w)/lr)", "def qft(self) -> None:\n random.seed(self.seed)\n self.__qft_input_state(self.circ_size)\n\n for i in range(self.circ_size):\n for j in range(i):\n self.cu1(math.pi / float(2 ** (i - j)), i, j)\n self.h(i)\n\n if self.meas: self.measure(self.qr, self.cr)", "def calc_F(self, peq):\n return self.dmat_d_.dot(np.log(peq))", "def test_QFTn(n):\n q = QuantumRegister(n, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n circ.x(q[0])\n RegX = [q[i] for i in range(n)]\n QFTn(circ, q, RegX)\n print(RegX)\n iQFTn(circ, q, RegX)\n launch2(circ)\n circ_m = measure_direct(circ, q, RegX)\n return circ_m", "def value(self):\n nd1 = super().nd1()\n nd2 = super().nd2()\n _nd1 = 1 - nd1\n _nd2 = 1 - nd2\n f1 = _nd1 * self.s\n f2 = _nd2 * self.x * math.exp(-self.rf * self.t)\n return f2 - f1", "def qindex2index(index):\n r = index.row()\n c = index.column()\n if c > 0x10:\n return (0x10 * r) + c - 0x11\n else:\n return (0x10 * r) + c", "def bias_index(self):\n return _div(abs(self.FN - self.FP), self.grand_total)", "def get_basis(self, t_index):\n return self._Phi[:, t_index], self._PhiD[:, t_index]", "def findex(self, findex):\n self.logger.debug(\"In 'findex' setter.\")\n\n self._findex = findex", "def rfpart(x):\n return 1 - fpart(x)", "def calculateTrafoIndex(self):\n if self.S <= TRAFO_LIMIT:\n # trafoIndex = (-1 / 24200) * self.S + 5\n trafoIndex = (-10 / 403333) * self.S + 3\n if (trafoIndex >= 0.0) & (trafoIndex <= 1.0):\n return trafoIndex\n elif trafoIndex > 1:\n return 1.0\n return 0.0", "def f0(self):\n return self._f0", "def wrap_fq_grad(atoms, qbin=.1, sum_type='fq'):\n q = atoms.get_positions().astype(np.float32)\n qbin = np.float32(qbin)\n # get scatter array\n if sum_type == 'fq':\n scatter_array = atoms.get_array('F(Q) scatter')\n else:\n scatter_array = atoms.get_array('PDF scatter')\n\n # define scatter_q information and initialize constants\n qmax_bin = scatter_array.shape[1]\n n = len(q)\n\n # Get pair coordinate distance array\n d = np.zeros((n, n, 3), np.float32)\n get_d_array(d, q)\n\n # Get pair distance array\n r = np.zeros((n, n), np.float32)\n get_r_array(r, d)\n\n # Get normalization array\n norm = np.zeros((n, n, qmax_bin), np.float32)\n get_normalization_array(norm, scatter_array)\n\n # Get omega\n omega = np.zeros((n, n, qmax_bin), np.float32)\n get_omega(omega, r, qbin)\n\n # Get grad omega\n grad_omega = np.zeros((n, n, 3, qmax_bin), np.float32)\n get_grad_omega(grad_omega, omega, r, d, qbin)\n\n # Get grad FQ\n get_grad_fq_inplace(grad_omega, norm)\n grad_fq = grad_omega\n\n # Normalize FQ\n grad_fq = grad_fq.sum(1)\n # '''\n norm = np.zeros((int(n * (n - 1) / 2.), qmax_bin), np.float32)\n flat_norm(norm, scatter_array, 0)\n na = np.mean(norm, axis=0) * np.float32(n)\n old_settings = np.seterr(all='ignore')\n grad_fq = np.nan_to_num(grad_fq / na)\n np.seterr(**old_settings)\n del d, r, scatter_array, norm, omega, grad_omega\n return grad_fq", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def _get_new_EF(self, EF, q):\n new_EF = EF + (0.1 - (5-q) * (0.08 + (5 - q) * 0.02))\n new_EF = self.min_EF if new_EF < self.min_EF else new_EF\n return new_EF" ]
[ "0.5615964", "0.54338056", "0.5380275", "0.53494614", "0.5307276", "0.52833897", "0.51446927", "0.5138274", "0.513018", "0.5049757", "0.5013984", "0.500352", "0.49888217", "0.4975394", "0.49607137", "0.49477506", "0.49375767", "0.4933698", "0.49302754", "0.4928633", "0.4926085", "0.4921625", "0.491578", "0.49040806", "0.48947626", "0.48757717", "0.4872403", "0.48692513", "0.48644823", "0.4864086" ]
0.6860514
0
Return E(F_q) or E(F_{q^r}). E is defined over F_q. If the method is called as E.order(), the result is E(F_q). If the method is called as E.order(r), the result is E(F_{q^r}).
def order(self, index=None): bfsize = card(self.basefield) if not self.ord: if self.ch in (2, 3): if bfsize == self.ch == 2: self.ord = self._order_2() elif bfsize == self.ch == 3: self.ord = self._order_3() else: error_message = "no E/F_{%d} order" % bfsize raise NotImplementedError(error_message) else: self.ord = self._trace_to_order(self.trace()) # final result if index: # for subfield curve basetrace = self._order_to_trace(self.ord) trace, oldtrace = basetrace, 2 for i in range(2, index + 1): trace, oldtrace = basetrace*trace - bfsize*oldtrace, trace return bfsize ** index + 1 - trace return self.ord
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def E(q, r0, x, y):\n den = np.hypot(x - r0[0], y - r0[1]) ** 3\n return q * (x - r0[0]) / den, q * (y - r0[1]) / den", "def order(self):\n if self.rank == 0:\n return S.One\n else:\n return S.Infinity", "def order(self):\n if self._order is not None:\n return self._order\n n = self._deg\n self._order = factorial(n)\n return self._order", "def pointorder(self, P, ord=None, f=None):\n # parameter ord and f are extension for structre.\n if ord:\n N = ord\n else:\n N = self.order()\n if f:\n l = f\n else:\n l = factor_methods.factor(N)\n o = 1\n for p, e in l:\n B = self.mul(N//(p**e), P)\n while B != [0]:\n o = o*p\n B = self.mul(p, B)\n return o", "def order (self):\n order = 1\n result = self * self\n while result != self:\n result *= self\n order += 1\n return order", "def get_order(self):\n order = getattr(self, 'method_order', None)\n if order is None: # User-supplied method in MyRungeKutta\n order = _calculate_order_1_level(self.butcher_tableau)\n return order", "def evaluate_Q_e(self, q):\n # positions of spring endpoints in GCS\n # distance vector\n r_ij_P = self.evaluate_rijP(q)\n\n # length of vector - spring length\n self.l = np.linalg.norm(r_ij_P, ord=2)\n\n I_r_ij = self._evaluate_I_r_ij(r_ij_P, self.l)\n\n # velocity of deformation of spring length\n dq_ = self.body_i.evaluate_dr(q, element_id=self.element_id, ksi=self.element_ksi)\n dl = np.dot(I_r_ij, dq_)\n\n # force value (amplitude) of spring element\n self.F_s = self._evaluate_F(self.l, self.dl)\n if self.direction == \"compression\":\n if self.F_s > 0.:\n self.F_s = 0.\n\n if self.direction == \"tension\":\n if self.F_s < 0.:\n self.F_s = 0.\n\n F = -self.F_s * I_r_ij\n\n # force on flexible body\n S = self._element._evaluate_S(self.element_ksi)\n Q_e_i_element = np.dot(S.T, F)\n\n Q_e_i = reduce(np.dot, [self._element.B.T, self._element.T.T, Q_e_i_element])\n\n # force on ground body\n Q_e_j = None\n\n return Q_e_i, Q_e_j", "def order(self,tol=1.e-14,mode='float',extremely_high_order=False):\n if mode=='float':\n if not extremely_high_order:\n import nodepy.oc_butcher as oc_butcher\n p = oc_butcher.order(self.__num__(),tol)\n else:\n import nodepy.oc_butcher_high_order as oc_butcher_high_order\n p = oc_butcher_high_order.order(self.__num__(),tol)\n if p==0:\n print('Apparent order is 0; this may be due to round-off. Try order(mode=\"exact\") or increase tol.')\n elif mode=='exact':\n from sympy import simplify\n p=0\n while True:\n z=self.order_condition_residuals(p+1)\n z = snp.array([simplify(zz) for zz in z])\n if np.any(abs(z)>tol): break\n p=p+1\n return p", "def eom(self, state, order):", "def reorderEigenvalues(self, *order):\n order = self._reorder(order)\n if not self._has(\"theta\"):\n self.eigenvalues()\n self._.theta = tuple(self._.theta[i] for i in order)\n if self._has(\"omega\"):\n self._.omega = Matrix(SR, [self._.omega[i] for i in order])\n if self._has(\"fsd\"):\n del self._.fsd\n return order", "def _get_new_EF(self, EF, q):\n new_EF = EF + (0.1 - (5-q) * (0.08 + (5 - q) * 0.02))\n new_EF = self.min_EF if new_EF < self.min_EF else new_EF\n return new_EF", "def nematic_order ( e ):\n\n import numpy as np\n \n # Calculate the nematic order parameter <P2(cos(theta))>\n # where theta is the angle between a molecular axis and the director\n # which is the direction that maximises the order parameter\n # This is obtained by finding the largest eigenvalue of\n # the 3x3 second-rank traceless order tensor\n\n # Note that this is not the same as the order parameter characterizing a crystal\n\n n, d = e.shape\n assert d==3, 'Error in e dimension '\n\n # Order tensor: outer product of each orientation vector, summed over n molecules\n q = np.sum ( e[:,:,np.newaxis]*e[:,np.newaxis,:], axis=0)\n q = 1.5 * q / n # Normalize\n for i in range(3):\n q[i,i] = q[i,i] - 0.5 # Make traceless\n\n evals = np.linalg.eigvalsh(q)\n return evals[2]", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def _qod_func(self, q):\n if self.qodulus is None:\n return q\n else:\n return q % self.qodulus", "def reorderEigenspaces(self, *order):\n order = self._reorder(order)\n if self._has(\"m\"):\n self._.m = tuple(self._.m[i] for i in order)\n if self._has(\"P\"):\n self._.P = Matrix(SR, [self._.P[i] for i in order])\n if self._has(\"Q\"):\n self._.Q = Matrix(SR, [[r[j] for j in order] for r in self._.Q])\n if self._has(\"q\"):\n self._.q.reorder(order)\n if self._has(\"qPolynomial_ordering\") and self._.qPolynomial_ordering:\n self._.qPolynomial_ordering = sorted(\n [tuple(order.index(i) for i in o)\n for o in self._.qPolynomial_ordering])", "def _getq(self, q=None):\n if q is None:\n return self.q\n elif isvector(q, self.n):\n return getvector(q, self.n)\n else:\n return getmatrix(q, (None, self.n))", "def get_one_order():", "def R(self, name, q):\n # check for function in dictionary\n if self._R.get(name, None) is None:\n self._R[name] = self._calc_R(name)\n parameters = tuple(q)\n return np.array(self._R[name](*parameters), dtype='float32')", "def order_evecs(self):\n i_sort = np.argsort(-abs(self.evals))\n self.evals = self.evals[i_sort]\n self.R = self.R[:,i_sort]\n self.L = self.L[:,i_sort]", "def order(self) -> float:\n return self._order", "def ea_from_q(p, q):\n return p * q / (0.622 + 0.378 * q)", "def q(self):\n return self._x", "def order ( self ) :\n return self.__order", "def op_q(self: Qs, q: Qs, first: bool = True, kind: str = \"\", reverse: bool = False) -> Qs:\n\n new_states = []\n\n for op in self.qs:\n\n if first:\n new_states.append(product(op, q, kind, reverse))\n\n else:\n new_states.append(product(q, op, kind, reverse))\n\n return Qs(\n new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns\n )", "def order(self):\n try:\n return self.order_set.all()[0]\n except IndexError:\n return None", "def effective_order_condition_residuals(self,q):\n from sympy import factorial,Rational\n A,b,c=self.A,self.b,self.c\n C=snp.diag(c)\n code=runge_kutta_order_conditions(q)\n tau=snp.zeros([q,len(self)])\n for j in range(1,q):\n tau[j,:]=(c**j/j-np.dot(A,c**(j-1)))/factorial(j-1)\n if q<=2:\n z=snp.zeros(len(code)+1)\n z[-1]=np.dot(b,c**(q-1))-Rational(1,q)\n if q==3:\n z=snp.zeros(len(code))\n exec('z[0]='+code[0]+'-'+'np.dot(b,c**2)/2.+1/6.')\n if q==4:\n code2=runge_kutta_order_conditions(q-1)\n z=snp.zeros(len(code)-1)\n exec('z[0]='+code[1]+'-'+'np.dot(b,np.dot(A,c**2))/2.+1/24.')\n exec('z[1]='+code2[0]+'-'+code[1]+'-'+code[2])\n if q>4:\n raise NotImplementedError\n return z", "def __float__(self):\n return self.q[0]", "def f(self, (k,t), (J,q,dq), **params):\n f = 0.*q\n return f", "def curve_order(self):\n\t\treturn self.h * self.n" ]
[ "0.55480236", "0.5491219", "0.5490189", "0.546878", "0.5363571", "0.53605866", "0.5355352", "0.5297144", "0.52107143", "0.51803726", "0.51718414", "0.5158498", "0.5098094", "0.5098094", "0.50961256", "0.5086906", "0.5086257", "0.50722677", "0.5063549", "0.5031349", "0.5021625", "0.5006192", "0.5006077", "0.4990248", "0.49892578", "0.49782822", "0.49727395", "0.49672583", "0.49655768", "0.49527785" ]
0.550242
1
find point order of P and return order.
def pointorder(self, P, ord=None, f=None): # parameter ord and f are extension for structre. if ord: N = ord else: N = self.order() if f: l = f else: l = factor_methods.factor(N) o = 1 for p, e in l: B = self.mul(N//(p**e), P) while B != [0]: o = o*p B = self.mul(p, B) return o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_point_order(point, field, a_value, b_value):\n order = int(2)\n try:\n s_point = add_points(point, point, field, a_value, b_value)\n except ValueError:\n return order\n while True:\n try:\n s_point = add_points(s_point, point, field, a_value, b_value)\n order += 1\n except ValueError:\n return order", "def naive_order_calculation(self):\n\t\torder = 0\n\t\tfor pt in self.enumerate_points():\n\t\t\torder += 1\n\t\treturn order", "def p(self):\n if self._p is None:\n self._p = self.order()\n return self._p", "def curve_order(self):\n\t\treturn self.h * self.n", "def add_equal_points(self, P):\n xp, yp = P\n s = ((3 * xp ** 2 + self.a) * self.inv(2 * yp)) % self.fp\n xr = (s ** 2 - xp - xp) % self.fp\n yr = (s * (xp - xr) - yp) % self.fp\n self.is_valid_point(self.P)\n return (xr, yr)", "def order(self):\n p = self._pants_decomposition\n g = p.genus()\n if g < 2 or p.num_punctures() > 0:\n raise NotImplementedError(\n \"The order computation currently \"\n \"only works for closed surfaces of genus 2 and higher.\")\n for n in range(1, 4*g+3):\n power = self**n\n if power.is_identity():\n if g > 2 or g == 2 and power.is_in_torelli():\n return n\n return 0", "def P_order_prime(a,seedE,b,p,Ordercurve,facto):\n x,y,P=random_point_with_a_seed_of_curve(a,seedE,b,p)\n OrderP=OrderPoint(P,Ordercurve,facto)\n pr=Prime(OrderP)\n while(not pr):\n #print(\"Work Point\")\n x,y,P=random_point_with_a_seed_of_curve(a,seedE,b,p)\n OrderP=OrderPoint(P,Ordercurve,facto)\n pr=Prime(OrderP)\n return P,OrderP", "def order_points(pts):\n pts = np.array(pts)\n sums = pts.sum(axis=1)\n topleft_id = np.argmin(sums)\n bottomright_id = np.argmax(sums)\n\n # Quite clumsy, rewrite here\n leftover_ids = [i for range(4) if i not in (topleft_id, bottomright_id)]\n topright_id = min(leftover_ids, key=lambda i: pts[i][0])\n bottomleft_id = leftover_ids[0] if leftover_ids[0] != topright_id else leftover_ids[1]\n\n return pts[[topleft_id, topright_id, bottomright_id, bottomleft_id]]", "def order_points(corners):\n top_left = corners.sum(1).argmin()\n bottom_right = corners.sum(1).argmax()\n top_right = np.diff(corners).argmin()\n bottom_left = np.diff(corners).argmax()\n\n ordered = np.array([corners[top_left], corners[top_right], corners[bottom_left], corners[bottom_right]], dtype = \"float32\")\n\n return ordered", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order", "def order_points(pts):\n\n\trect = np.zeros((4, 2), dtype=\"float32\")\n\ts = pts.sum(axis=1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n\tdiff = np.diff(pts, axis=1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n\n\treturn rect", "def is_pPolynomial(self):\n if not self._has(\"p\"):\n self.pTable()\n if not self._has(\"pPolynomial_ordering\"):\n pPoly = tuple(filter(None, (self._is_polynomial(self._.p, i)\n for i in range(1, self._.d+1))))\n self._.pPolynomial_ordering = False if len(pPoly) == 0 else pPoly\n return self._.pPolynomial_ordering", "def _GetEpiOrder(self):\n self.epi_series.sort()\n for series in self.epi_series:\n self.GetEpiAcqTimes(series)\n self.AssignEpiNames()", "def side_points(p, v, L): \r\n u = np.array([-v[1], v[0]]) # positive normal of v:\r\n N = list() # list of points on one side of the line p,v:\r\n for k in range(len(L)):\r\n if (L[k] - p).dot(u) >= 0:\r\n N.append(L[k])\r\n \r\n return N", "def positions(self):\n return self.preorder() # return entire preorder iteration", "def order_points(pts):\n rect = np.zeros((4, 2), dtype=\"float32\")\n sum_pts = pts.sum(axis=1)\n rect[0] = pts[np.argmin(sum_pts)]\n rect[2] = pts[np.argmax(sum_pts)]\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n return rect", "def posisions(self):\n return self.preorder() # return entire preorder iteration", "def order(self):\n return len(self.coeff)-1", "def add_points(self, P, Q):\n xp, yp = P\n xq, yq = Q\n s = ((yq - yp) * self.inv(xq - xp)) % self.fp\n xr = (s ** 2 - xp - xq) % self.fp\n yr = (s * (xp - xr) - yp) % self.fp\n return (xr, yr)", "def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works", "def p2vertices(self, p):\n h = self.top\n verts = np.empty((self.nparams + 2, 2))\n verts[:, 0] = self._modelx\n verts[:, 1] = np.concatenate([[h], p, [h]])\n return verts", "def order(self):\n return self._degree + 1", "def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)", "def order_vertices(self):\n y = [self.function(*x) for x in self.vertices]\n order = np.argsort(y)\n vertices = np.array([self.vertices[order[i]] for i in range(len(self.vertices))])\n self.vertices = vertices", "def PGL_order(A):\n\n n = 1\n AA = PGL_repn(A)\n B = copy(AA)\n while B[0][0] != B[1][1] or B[0][1] != 0 or B[1][0] != 0:\n n = n + 1\n B = AA*B\n\n return n", "def reorder(pts):\r\n # pts is a numpy array tha looks like [ [[numX numY]] [[num num]] [[num num]] [[num num]] ]\r\n pts = pts.reshape((4, 2)) # make it look like [ [numX numY] [num num] [num num] [num num] ]\r\n pts_new = np.zeros((4, 2), np.float32)\r\n\r\n add = pts.sum(1) # array like [ numX+numY num+num num+num num+num ]\r\n pts_new[0] = pts[np.argmin(add)] # the dot that is the nearest to the (0, 0)\r\n pts_new[2] = pts[np.argmax(add)] # the remotest one\r\n\r\n diff = np.diff(pts, 1) # array like [ [numY-numX] [num-num] [num-num] [num-num] ]\r\n pts_new[1] = pts[np.argmin(diff)]\r\n pts_new[3] = pts[np.argmax(diff)]\r\n\r\n return pts_new", "def getCoordinates(p):\n if p[0] == 'p': # minimum bounding rectangle for point\n return (int(p[1]), int(p[2]), int(p[1]), int(p[2]))\n elif p[0] == 'c': # minimum bounding rectangle for circle\n x = int(p[1])\n y = int(p[2])\n r = int(p[3])\n return (x - r, y - r, x + r, y + r)\n elif p[0] == 'l': # minimum bounding rectangle for line segment\n x1 = int(p[1])\n y1 = int(p[2])\n x2 = int(p[3])\n y2 = int(p[4])\n if y2 > y1:\n if x1 < x2:\n return (x1, y1, x2, y2)\n else:\n return (x2, y1, x1, y2)\n else:\n if x1 < x2:\n return (x1, y2, x2, y1)\n else:\n return (x2, y2, x1, y1)", "def positions(self):\n return self.preorder()", "def add(self, P, Q):\n if not (isinstance(P, list) and isinstance(Q, list)):\n raise ValueError(\"point P (resp. Q) must be [px, py] (resp. [qx, qy])\")\n #if not (self.whetherOn(P) and self.whetherOn(Q)):\n # raise ValueError(\"either points must not be point on curve.\")\n\n if (P == self.infpoint) and (Q != self.infpoint):\n return Q\n elif (P != self.infpoint) and (Q == self.infpoint):\n return P\n elif (P == self.infpoint) and (Q == self.infpoint):\n return self.infpoint\n\n if self.ch == 0:\n # FIXME\n if P[0] == Q[0]:\n if P[1]+Q[1]+self.a1*Q[0]+self.a3 == 0:\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1]-P[1])/(Q[0]-P[0])\n t = (P[1]*Q[0]-Q[1]*P[0])/(Q[0]-P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R\n else:\n if not (P[0] - Q[0]):\n # FIXME: the condition is P[0] == Q[0] intuitively,\n # but sometimes there are int vs FiniteFieldElement\n # comparisons ...\n if not (P[1]+Q[1]+self.a1*Q[0]+self.a3):\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1] - P[1]*self.basefield.one) / (Q[0] - P[0])\n t = (P[1]*Q[0] - Q[1]*P[0]*self.basefield.one)/ (Q[0] - P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R", "def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc" ]
[ "0.71685904", "0.6964563", "0.67810553", "0.64402294", "0.6379234", "0.63468045", "0.631201", "0.6015451", "0.5895901", "0.5851597", "0.5826416", "0.571477", "0.5679976", "0.56701523", "0.5660304", "0.56364566", "0.56116635", "0.5606401", "0.5592746", "0.5569785", "0.556012", "0.5529508", "0.551907", "0.5516069", "0.5497765", "0.54777086", "0.5477654", "0.5474928", "0.54667914", "0.5460532" ]
0.7694008
0
computing the TateLichetenbaum pairing with Miller's algorithm. parameters satisfies that mul(m,P)==[0].
def TatePairing(self, m, P, Q): O = self.infpoint if self.mul(m, P) != O: raise ValueError("sorry, not mP=[0].") if P == O or Q == O: return self.basefield.one forbidden = [O, P, self.mul(-1, Q), self.sub(P, Q)] R = self.add(P, Q) T = False while (not T): while R in forbidden: R = self.point() forbidden.append(R) T = self.Miller(P, m, Q, R) return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Langmuir_Knudsen_mdot(D, T_p, Psat, Re, mu_g, cp_g, lambda_g, P_g, R_g, Sc_g, R_v, Yinf):\n Pr_g = mu_g * cp_g / lambda_g # Gas Prandtl Number\n Sh = 2.0 + 0.552 * math.sqrt(Re) * Sc_g ** (1.0/3.0) \n Re_b = 0.0 #Blowing Reynolds number \n Re_b0 = Re_b \n Xseq = min(Psat / P_g, 1.0) #Molar mass fraction\n theta2 = R_v / R_g ;\n Yseq = min(Xseq /max(Xseq + (1.0 - Xseq) * theta2, 1e-30), 1.0) \n yMin = min(Yseq, Yinf) \n yMax = max(Yseq, Yinf) \n\n # Iterate to converge on Re_b\n # This part could be optimized\n Lk = computeLK(T_p,R_v,mu_g,Sc_g,P_g) \n Re_b0 = estimate_re_b(yMin, yMax, Yinf, Sh, Sc_g)\n \n max_solver_iterations = 100\n for i in range(max_solver_iterations): \n Ysneq = computeYsneq(Xseq,Lk,D,theta2,Pr_g,Re_b) \n #Bound Ysneq so that it lies between Yseq and Yinf\n Ysneq = max(yMin, min(yMax, Ysneq)) \n BMneq = (Ysneq - Yinf) / max(1.0 - Ysneq, 1e-30) \n Hm = math.log(max(1.0 + BMneq, 1e-40)) \n Re_b0 = Re_b \n Re_b = Hm * Sh / Sc_g \n factor = min(0.8, 0.5 * D / Lk) #Damping factor\n\n if i >= max_solver_iterations:\n print('Mdot Calculation failed to converge')\n\n if abs(Re_b - Re_b0) < 1.0e-6:\n break \n\n #Relax update to help convergence\n Re_b = factor * Re_b + (1.0 - factor) * Re_b0 \n \n #Chris debug\n beta = 0.5 * Pr_g * Re_b ;\n if i > -1: \n format_string = 'i= {0:<4d} Re_b= {1:<8.4f} Ysneq= {2:<8.4f} Hm= {3:<8.4f} ' \\\n 'BMneq= {4:<8.4f} Lk= {5:<8.4e} Lk/D= {6:<8.4e} factor= {7:<8.4e} beta= {8:<8.4f}'\n print(format_string.format(i, Re_b, computeYsneq(Xseq,Lk,D,theta2,Pr_g,Re_b), Hm, BMneq, Lk, Lk/D, factor, beta))\n \n # Back out mdot from blowing reunolds number\n mdot = -Re_b * D * mu_g * math.pi \n return mdot", "def WeilPairing(self, m, P, Q):\n O = self.infpoint\n if self.mul(m, P) != O or self.mul(m, Q) != O:\n raise ValueError(\"sorry, not mP=[0] or mQ=[0].\")\n\n if P == O or Q == O or P == Q:\n return self.basefield.one\n\n T = U = False\n forbidden = [O, P, Q, self.sub(Q, P)]\n R = self.sub(P,Q) # assume Q not in group generated P\n while (not T) or (not U):\n while R in forbidden:\n R = self.point()\n T = self.Miller(Q, m, P, R)\n# S = self.add(P, R)\n# if S == O:\n# continue\n# S = self.sub(Q, R)\n# if S == O:\n# continue\n U = self.Miller(P, m, Q, self.mul(-1, R))\n F = U/T\n return F", "def multiplicaciones(): #906609 tiene que darme\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo", "def mendel_pair(k, m, n):\n t = k + m + n\n # To calculate this, just use a decision tree...\n return 1 - (1/(t*(t-1)))*(0.25*m*(m-1) + m*n + n*(n-1))", "def mersenne(p):\n return 2 ** p -1", "def _holt_mul_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n if alpha == 0.0:\n return max_seen\n if beta > alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1] * b[i - 1]**phi))\n b[i] = (beta * (l[i] / l[i - 1])) + (betac * b[i - 1]**phi)\n return sqeuclidean(l * b**phi, y)", "def Miller(self, P, m, Q, R):\n # FIXME: rewrite the document\n # check order\n if m < 2 or not (m % self.ch):\n raise ValueError(\"order more than 1 and not be divisible by characteristic\")\n\n O = self.infpoint\n\n # support point must not be P-Q\n S = self.add(R, Q)\n if S == O:\n return False\n\n # j = 1\n jP = P\n v = self.basefield.one\n for k in arith1.expand(m, 2)[-2::-1]:\n j2P = self.mul(2, jP)\n denominator = self.line(jP, jP, R) * self.line(j2P, O, S)\n if not denominator:\n return False\n numerator = self.line(jP, jP, S) * self.line(j2P, O, R)\n if not numerator:\n return False\n f = numerator / denominator\n v = v**2 * f\n # j *= 2\n jP = j2P\n if k:\n kjP = self.add(P, jP)\n denominator = self.line(P, jP, R) * self.line(kjP, O, S)\n if not denominator:\n return False\n numerator = self.line(P, jP, S) * self.line(kjP, O, R)\n if not numerator:\n return False\n f = numerator / denominator\n v = v * f\n # j += 1\n jP = kjP\n # now j == m\n return v", "def p(e, t):\n return b * e ** 2", "def N_k_TT_m(self, L, m=0, optimal=True):\n if L>2.*self.CMB.lMaxT:\n return 0.\n \n # integrand\n def integrand(lnl):\n l = np.exp(lnl)\n \n # choose l bounds so that the theta integral can cover the full circle\n # otherwise, the multipole estimators will be biased\n if (np.abs(l-L/2)<self.CMB.lMin) or (l+L/2>self.CMB.lMaxT):\n result = 0.\n \n else:\n result = self.f_TT_multipole_interp(L, l, m)**2\n result *= l**2 / (2.*np.pi)\n if m>0:\n result /= 4.\n\n # use the optimal noise weighting: angular average of the Cl^total\n if optimal:\n # angular integrand\n def f(t):\n l1, l2, phi, theta1, theta2 = self.config_multipole(L, l, t)\n result = self.CMB.ftotalTT(l1) * self.CMB.ftotalTT(l2)\n result *= np.cos(m*t)**2\n result /= 2.*np.pi\n result *= 2.\n result *= 2. # because integrating over half the domain\n return result\n # compute angular integral\n integral = integrate.quad(f, 0., np.pi, epsabs=0, epsrel=1.e-3)[0]\n # else use the suboptimal intuitive noise\n else:\n integral = 2. * self.CMB.ftotalTT(l)**2\n result /= integral\n \n if not np.isfinite(result):\n result = 0.\n return result\n \n result = integrate.quad(integrand, np.log(1.), np.log(self.CMB.lMaxT), epsabs=0, epsrel=1.e-3)[0]\n result = (L**2/2.)**2 / result\n if not np.isfinite(result):\n result = 0.\n print \"- done L=\"+str(L), result\n return result", "def mod(p):\n return (p[0]**2 + p[1]**2 + p[2]**2)**0.5", "def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return", "def prjEuler():\r\n a = 1\r\n for a in range( 1, 1000 ):\r\n for b in range( 1, 1000 ):\r\n if( ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) % 1 ) == 0 ):\r\n if( ( a + b + ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) ) ) == 1000 ):\r\n print \"The product is %d\" % ( a * b * ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) ) )\r\n return\r\n \r\n return", "def problem2(m, p):\n total = 0\n for k in range(m, m ** p):\n if is_prime(k):\n total = total + sum_of_digits(k)\n return total", "def RKC2(m,epsilon=0):\n import sympy\n one = sympy.Rational(1)\n\n x=sympy.Symbol('x')\n Tm=sympy.polys.orthopolys.chebyshevt_poly(m,x)\n\n w0=one+sympy.Rational(epsilon,m**2)\n w1=sympy.Rational(Tm.diff().subs(x,w0),Tm.diff(x,2).subs(x,w0))\n\n alpha=snp.zeros([m+1,m])\n beta=snp.zeros([m+1,m])\n\n b=snp.zeros(m+1)\n a=snp.zeros(m+1)\n mu=snp.zeros(m+1)\n nu=snp.zeros(m+1)\n mut=snp.zeros(m+1)\n gamt=snp.zeros(m+1)\n\n T2 = sympy.polys.orthopolys.chebyshevt_poly(2,x)\n b[0]=sympy.Rational(T2.diff(x,2).subs(x,w0),(T2.diff().subs(x,w0))**2)\n\n b[1]=one/w0\n mut[1] = b[1]*w1\n alpha[1,0]=one\n beta[1,0]=mut[1]\n\n for j in range(2,m+1):\n Tj=sympy.polys.orthopolys.chebyshevt_poly(j,x)\n b[j] = sympy.Rational(Tj.diff(x,2).subs(x,w0),(Tj.diff().subs(x,w0))**2)\n\n a[j] = one-b[j]*Tj.subs(x,w0)\n mu[j]= 2*b[j]*w0/b[j-1]\n nu[j]= -b[j]/b[j-2]\n mut[j] = mu[j]*w1/w0\n gamt[j] = -a[j-1]*mut[j]\n\n alpha[j,0]=one-mu[j]-nu[j]\n alpha[j,j-1]=mu[j]\n alpha[j,j-2]=nu[j]\n beta[j,j-1]=mut[j]\n beta[j,0]=gamt[j]\n\n shortname='RKC'+str(m)+'2'\n name = 'Runge-Kutta-Chebyshev ('+str(m)+',2)'\n return ExplicitRungeKuttaMethod(alpha=alpha,beta=beta,name=name,shortname=shortname)", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def tourney_prob(k, N, m):\n\n if N < m:\n print \"The second argument cannot be smaller than the third one.\"\n sys.exit()\n\n if m < 1 or k <= 0:\n return 0.0\n elif m == 1:\n return 1.0 / N\n else:\n return float(N - k) * m / (N * (m - 1)) * tourney_prob(k, N - 1, m - 1)", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def power_output_candidate_thermal_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y != m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n else:\r\n raise Exception(f'Unhandled case: {g, y, s, t}')", "def gen_samp_hammersley(self, i1, i2, m, n, vectorized = True):\n dist = self.dist\n \n def hammersley_sequence (i1, i2, m, n):\n \n \n if ( i1 <= i2 ):\n i3 = +1\n else:\n i3 = -1\n \n l = abs ( i2 - i1 ) + 1\n r = np.zeros ( [ m, l ] )\n k = 0\n \n for i in range ( i1, i2 + i3, i3 ):\n \n t = np.ones ( m - 1 )\n \n t = t * i\n #\n # Carry out the computation.\n #\n prime_inv = np.zeros ( m - 1 )\n for j in range ( 0, m - 1 ):\n prime_inv[j] = 1.0 / prime ( j )\n \n r[0,k] = float ( i % ( n + 1 ) ) / float ( n )\n \n while ( 0 < np.sum ( t ) ):\n for j in range ( 0, m - 1 ):\n d = ( t[j] % prime ( j ) )\n r[j+1,k] = r[j+1,k] + float ( d ) * prime_inv[j]\n prime_inv[j] = prime_inv[j] / prime ( j )\n t[j] = ( t[j] // prime ( j ) )\n \n k = k + 1\n \n return r\n \n def prime (n):\n \n from sys import exit\n \n prime_max = 1600\n \n prime_vector = np.array ( [\n 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, \\\n 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, \\\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, \\\n 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, \\\n 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, \\\n 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, \\\n 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, \\\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, \\\n 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, \\\n 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, \\\n 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, \\\n 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, \\\n 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, \\\n 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, \\\n 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, \\\n 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, \\\n 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, \\\n 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, \\\n 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, \\\n 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, \\\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, \\\n 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, \\\n 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, \\\n 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, \\\n 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, \\\n 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, \\\n 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, \\\n 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, \\\n 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, \\\n 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, \\\n 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, \\\n 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, \\\n 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, \\\n 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, \\\n 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, \\\n 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, \\\n 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, \\\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, \\\n 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, \\\n 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, \\\n 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, \\\n 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, \\\n 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, \\\n 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, \\\n 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, \\\n 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, \\\n 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, \\\n 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, \\\n 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, \\\n 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, \\\n 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, \\\n 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, \\\n 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, \\\n 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, \\\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, \\\n 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, \\\n 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, \\\n 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, \\\n 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, \\\n 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, \\\n 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, \\\n 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, \\\n 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, \\\n 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, \\\n 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, \\\n 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, \\\n 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, \\\n 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, \\\n 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, \\\n 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, \\\n 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, \\\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, \\\n 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, \\\n 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, \\\n 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, \\\n 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, \\\n 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, \\\n 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, \\\n 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, \\\n 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, \\\n 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, \\\n 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, \\\n 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, \\\n 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, \\\n 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, \\\n 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, \\\n 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, \\\n 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, \\\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, \\\n 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, \\\n 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, \\\n 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, \\\n 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, \\\n 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, \\\n 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, \\\n 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, \\\n 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, \\\n 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, \\\n 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, \\\n 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, \\\n 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, \\\n 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, \\\n 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, \\\n 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, \\\n 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, \\\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, \\\n 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, \\\n 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, \\\n 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, \\\n 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, \\\n 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, \\\n 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, \\\n 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, \\\n 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, \\\n 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, \\\n 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, \\\n 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, \\\n 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, \\\n 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, \\\n 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, \\\n 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, \\\n 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, \\\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973,10007, \\\n 10009,10037,10039,10061,10067,10069,10079,10091,10093,10099, \\\n 10103,10111,10133,10139,10141,10151,10159,10163,10169,10177, \\\n 10181,10193,10211,10223,10243,10247,10253,10259,10267,10271, \\\n 10273,10289,10301,10303,10313,10321,10331,10333,10337,10343, \\\n 10357,10369,10391,10399,10427,10429,10433,10453,10457,10459, \\\n 10463,10477,10487,10499,10501,10513,10529,10531,10559,10567, \\\n 10589,10597,10601,10607,10613,10627,10631,10639,10651,10657, \\\n 10663,10667,10687,10691,10709,10711,10723,10729,10733,10739, \\\n 10753,10771,10781,10789,10799,10831,10837,10847,10853,10859, \\\n 10861,10867,10883,10889,10891,10903,10909,10937,10939,10949, \\\n 10957,10973,10979,10987,10993,11003,11027,11047,11057,11059, \\\n 11069,11071,11083,11087,11093,11113,11117,11119,11131,11149, \\\n 11159,11161,11171,11173,11177,11197,11213,11239,11243,11251, \\\n 11257,11261,11273,11279,11287,11299,11311,11317,11321,11329, \\\n 11351,11353,11369,11383,11393,11399,11411,11423,11437,11443, \\\n 11447,11467,11471,11483,11489,11491,11497,11503,11519,11527, \\\n 11549,11551,11579,11587,11593,11597,11617,11621,11633,11657, \\\n 11677,11681,11689,11699,11701,11717,11719,11731,11743,11777, \\\n 11779,11783,11789,11801,11807,11813,11821,11827,11831,11833, \\\n 11839,11863,11867,11887,11897,11903,11909,11923,11927,11933, \\\n 11939,11941,11953,11959,11969,11971,11981,11987,12007,12011, \\\n 12037,12041,12043,12049,12071,12073,12097,12101,12107,12109, \\\n 12113,12119,12143,12149,12157,12161,12163,12197,12203,12211, \\\n 12227,12239,12241,12251,12253,12263,12269,12277,12281,12289, \\\n 12301,12323,12329,12343,12347,12373,12377,12379,12391,12401, \\\n 12409,12413,12421,12433,12437,12451,12457,12473,12479,12487, \\\n 12491,12497,12503,12511,12517,12527,12539,12541,12547,12553, \\\n 12569,12577,12583,12589,12601,12611,12613,12619,12637,12641, \\\n 12647,12653,12659,12671,12689,12697,12703,12713,12721,12739, \\\n 12743,12757,12763,12781,12791,12799,12809,12821,12823,12829, \\\n 12841,12853,12889,12893,12899,12907,12911,12917,12919,12923, \\\n 12941,12953,12959,12967,12973,12979,12983,13001,13003,13007, \\\n 13009,13033,13037,13043,13049,13063,13093,13099,13103,13109, \\\n 13121,13127,13147,13151,13159,13163,13171,13177,13183,13187, \\\n 13217,13219,13229,13241,13249,13259,13267,13291,13297,13309, \\\n 13313,13327,13331,13337,13339,13367,13381,13397,13399,13411, \\\n 13417,13421,13441,13451,13457,13463,13469,13477,13487,13499 ] )\n \n if ( n < 0 or prime_max <= n ):\n print ( '' )\n print ( 'PRIME - Fatal error!' )\n print ( ' 0 <= N < %d' % ( prime_max ) )\n exit ( 'PRIME - Fatal error!' )\n \n return prime_vector[n] \n \n hamm_seq = hammersley_sequence(i1, i2, m, n)\n samp = dist.ppf(hamm_seq)\n \n if vectorized == False:\n return(samp)\n \n else: \n samp = samp.reshape(1, -1)\n return(samp)", "def Pollard_pm1(n, primes, max_B=1000000):\n B = 10\n g = 1\n while B < max_B and g < n:\n a = randint(2, n - 2)\n g = gcd(a, n)\n if g != 1:\n return g\n for p in primes:\n if p >= B:\n break\n pd = 1 # p^d\n while pd * p <= B:\n pd *= p\n a = powmod(a, pd, n)\n g = gcd(a - 1, n)\n if g != 1 and g != n:\n return g\n B *= 2\n return 1", "def payoff_n_p(p, n=3,\n MLB_contract=4158333, minor_contract=6600, thresh=1500000):\n distribution = []\n for n_makers in range(n + 1):\n if n_makers == 0:\n payoff_prob = [1 - prob for prob in p.values()]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((minor_contract, payoff_prob))\n elif n_makers == n:\n payoff_prob = [prob for prob in p.values()]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((MLB_contract, payoff_prob))\n else:\n makers = list(combinations(range(1, n + 1), n_makers))\n for maker_set in makers:\n if 1 in maker_set:\n payoff = MLB_contract - 0.1*(MLB_contract-thresh)\n payoff += (n_makers-1)*0.1*(MLB_contract-thresh)/(n-1)\n payoff_prob = [p[player] for player in maker_set]\n payoff_prob += [1-p[player] for player in p.keys() if player not in maker_set]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((payoff, payoff_prob))\n else:\n payoff = minor_contract\n payoff += n_makers*0.1*(MLB_contract-thresh)/(n-1)\n payoff_prob = [p[player] for player in maker_set]\n payoff_prob += [1-p[player] for player in p.keys() if player not in maker_set]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((payoff, payoff_prob))\n E_payoff = [a*b for (a, b) in distribution]\n E_payoff = sum(E_payoff)\n var_payoff = [((a-E_payoff)**2)*b for (a, b) in distribution]\n var_payoff = sum(var_payoff)\n return E_payoff, var_payoff**0.5", "def mr_pairs_have_less_mi_exp(filename=None):\n trials = 500\n matrix = [[0,0,0,0] for i in range(L)]\n motif = [random_site(L) for i in range(n)]\n scale = 0.01 #use this to prevent overflows in anneal\n scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale\n annealed_system = lambda :anneal(scaled_sse,\n lambda(matrix,motif):propose(matrix,motif),\n (matrix,motif),\n verbose=True,\n iterations=100000,\n stopping_crit = 0.1*scale)\n systems = [annealed_system() for i in xrange(500)]\n motifs = map(second,systems)\n ics = map(motif_ic,motifs)\n control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]\n mis = map(total_motif_mi,motifs)\n control_mis = map(total_motif_mi,control_motifs)\n plt.scatter(mis,control_mis)\n plt.xlabel(\"M-R System Mutual Information (bits)\")\n plt.ylabel(\"Annealed Motif Mutual Information (bits)\")\n plt.plot([0,5],[0,5])\n maybesave(filename)\n #mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)\n return mis,control_mis", "def challenge23():\n seed = random.randint(1, 2 ** 31)\n orig_mt = MersenneTwister(seed)\n copy_mt = MersenneTwister(0)\n for index, p in enumerate(orig_mt.generate(624)):\n copy_mt.y[index] = untempering(p)\n return orig_mt, copy_mt", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def product_generic(m1, m2, p):\r\n if m1[-1] == m2[0] == 1:\r\n return {}\r\n else:\r\n return make_mono_admissible_generic(m1[:-1] + (m1[-1] + m2[0],) + m2[1:], p)", "def PBl1(l_target, m):\n #TobiaC 2011-10-13 (2011-10-13)\n PBtab=numpy.array([0.0, -1.0, -3.0])\n l=2\n while l_target>l:\n PBlp1=((2*l+1)*PBtab[l +0]-(l+1)*PBtab[l-1 +0])/float(l)\n PBtab=numpy.hstack((PBtab, numpy.array([PBlp1])))\n l=l+1\n if m==1:\n PBout=PBtab[l_target +0]\n else:\n if m==-1:\n PBout=-math.factorial(l_target-1)/float(math.factorial(l_target+1))*PBtab[l_target +0]\n else:\n PBout=0.0\n return PBout", "def prob_m_of_n(m, n, T, l):\n PFD_one_unit = l*T\n m_of_n = binom(n, m) * (PFD_one_unit)**(n-m) * (1-PFD_one_unit)**m\n return m_of_n", "def server2_mult(b1, b2, pub):\n return (b1 * b2) % pub.n", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def mw (mva, pf):\r\n x= mva*1000000\r\n y=x*pf/1000000\r\n return y" ]
[ "0.62009764", "0.6168753", "0.598084", "0.5918141", "0.5906334", "0.590612", "0.5900798", "0.5865663", "0.5837559", "0.56921166", "0.5651081", "0.5643009", "0.5626161", "0.5586395", "0.5570056", "0.5568218", "0.5552037", "0.5544669", "0.5534943", "0.5525335", "0.5522545", "0.5512832", "0.55051136", "0.5497917", "0.5494445", "0.5493611", "0.5468648", "0.5442134", "0.5428528", "0.54238105" ]
0.6256162
0
computing the Weil pairing with Miller's algorithm. we assume point P and Q that be in different mtortion group .
def WeilPairing(self, m, P, Q): O = self.infpoint if self.mul(m, P) != O or self.mul(m, Q) != O: raise ValueError("sorry, not mP=[0] or mQ=[0].") if P == O or Q == O or P == Q: return self.basefield.one T = U = False forbidden = [O, P, Q, self.sub(Q, P)] R = self.sub(P,Q) # assume Q not in group generated P while (not T) or (not U): while R in forbidden: R = self.point() T = self.Miller(Q, m, P, R) # S = self.add(P, R) # if S == O: # continue # S = self.sub(Q, R) # if S == O: # continue U = self.Miller(P, m, Q, self.mul(-1, R)) F = U/T return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TatePairing(self, m, P, Q):\n O = self.infpoint\n if self.mul(m, P) != O:\n raise ValueError(\"sorry, not mP=[0].\")\n\n if P == O or Q == O:\n return self.basefield.one\n\n forbidden = [O, P, self.mul(-1, Q), self.sub(P, Q)]\n R = self.add(P, Q)\n T = False\n while (not T):\n while R in forbidden:\n R = self.point()\n forbidden.append(R)\n T = self.Miller(P, m, Q, R)\n return T", "def Miller(self, P, m, Q, R):\n # FIXME: rewrite the document\n # check order\n if m < 2 or not (m % self.ch):\n raise ValueError(\"order more than 1 and not be divisible by characteristic\")\n\n O = self.infpoint\n\n # support point must not be P-Q\n S = self.add(R, Q)\n if S == O:\n return False\n\n # j = 1\n jP = P\n v = self.basefield.one\n for k in arith1.expand(m, 2)[-2::-1]:\n j2P = self.mul(2, jP)\n denominator = self.line(jP, jP, R) * self.line(j2P, O, S)\n if not denominator:\n return False\n numerator = self.line(jP, jP, S) * self.line(j2P, O, R)\n if not numerator:\n return False\n f = numerator / denominator\n v = v**2 * f\n # j *= 2\n jP = j2P\n if k:\n kjP = self.add(P, jP)\n denominator = self.line(P, jP, R) * self.line(kjP, O, S)\n if not denominator:\n return False\n numerator = self.line(P, jP, S) * self.line(kjP, O, R)\n if not numerator:\n return False\n f = numerator / denominator\n v = v * f\n # j += 1\n jP = kjP\n # now j == m\n return v", "def _step(self, P, W):\n Qa = self.mul(self.ch + 1, P)\n if len(Qa) == 1:\n _log.debug(\"[%d]P is zero\" % (self.ch + 1))\n Qb = R = self.mul(W, P)\n A = [Qa[0]]\n B = [0, Qb[0]] # 0 = [0][0] ((infinity)_x)\n for i in range(1, W):\n Qa = self.add(Qa, P)\n Qb = self.add(Qb, R)\n A.append(Qa[0])\n B.append(Qb[0])\n if len(Qa) == 1:\n _log.debug(\"[%d]P is zero\" % (self.ch + 1 + i))\n break\n return [A, B, set(A).intersection(set(B))]", "def l1(P, q):\n\n m, n = P.size\n\n # Solve equivalent LP \n #\n # minimize [0; 1]' * [u; v]\n # subject to [P, -I; -P, -I] * [u; v] <= [q; -q]\n #\n # maximize -[q; -q]' * z \n # subject to [P', -P']*z = 0\n # [-I, -I]*z + 1 = 0 \n # z >= 0 \n \n c = matrix(n*[0.0] + m*[1.0])\n h = matrix([q, -q])\n\n def Fi(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): \n if trans == 'N':\n # y := alpha * [P, -I; -P, -I] * x + beta*y\n u = P*x[:n]\n y[:m] = alpha * ( u - x[n:]) + beta*y[:m]\n y[m:] = alpha * (-u - x[n:]) + beta*y[m:]\n\n else:\n # y := alpha * [P', -P'; -I, -I] * x + beta*y\n y[:n] = alpha * P.T * (x[:m] - x[m:]) + beta*y[:n]\n y[n:] = -alpha * (x[:m] + x[m:]) + beta*y[n:]\n\n\n def Fkkt(W): \n\n # Returns a function f(x, y, z) that solves\n #\n # [ 0 0 P' -P' ] [ x[:n] ] [ bx[:n] ]\n # [ 0 0 -I -I ] [ x[n:] ] [ bx[n:] ]\n # [ P -I -W1^2 0 ] [ z[:m] ] = [ bz[:m] ]\n # [-P -I 0 -W2 ] [ z[m:] ] [ bz[m:] ]\n #\n # On entry bx, bz are stored in x, z.\n # On exit x, z contain the solution, with z scaled (W['di'] .* z is\n # returned instead of z). \n\n d1, d2 = W['d'][:m], W['d'][m:]\n D = 4*(d1**2 + d2**2)**-1\n A = P.T * spdiag(D) * P\n lapack.potrf(A)\n\n def f(x, y, z):\n\n x[:n] += P.T * ( mul( div(d2**2 - d1**2, d1**2 + d2**2), x[n:]) \n + mul( .5*D, z[:m]-z[m:] ) )\n lapack.potrs(A, x)\n\n u = P*x[:n]\n x[n:] = div( x[n:] - div(z[:m], d1**2) - div(z[m:], d2**2) + \n mul(d1**-2 - d2**-2, u), d1**-2 + d2**-2 )\n\n z[:m] = div(u-x[n:]-z[:m], d1)\n z[m:] = div(-u-x[n:]-z[m:], d2)\n\n return f\n\n\n # Initial primal and dual points from least-squares solution.\n\n # uls minimizes ||P*u-q||_2; rls is the LS residual.\n uls = +q\n lapack.gels(+P, uls)\n rls = P*uls[:n] - q \n\n # x0 = [ uls; 1.1*abs(rls) ]; s0 = [q;-q] - [P,-I; -P,-I] * x0\n x0 = matrix( [uls[:n], 1.1*abs(rls)] ) \n s0 = +h\n Fi(x0, s0, alpha=-1, beta=1) \n\n # z0 = [ (1+w)/2; (1-w)/2 ] where w = (.9/||rls||_inf) * rls \n # if rls is nonzero and w = 0 otherwise.\n if max(abs(rls)) > 1e-10: \n w = .9/max(abs(rls)) * rls\n else: \n w = matrix(0.0, (m,1))\n z0 = matrix([.5*(1+w), .5*(1-w)])\n\n dims = {'l': 2*m, 'q': [], 's': []}\n sol = solvers.conelp(c, Fi, h, dims, kktsolver = Fkkt, \n primalstart={'x': x0, 's': s0}, dualstart={'z': z0})\n return sol['x'][:n]", "def hellinger_weighted(mu1, sigma1, pi1, mu2, sigma2, pi2):\n sigma1norm = np.linalg.norm(sigma1)\n sigma2norm = np.linalg.norm(sigma2)\n X0 = np.zeros(mu1.shape)\n i = 2 * (sigma1norm**(1.0/4)) * (sigma2norm**(1.0/4)) * np.sqrt(2*np.pi) *\\\n gmm.mulnormpdf(X0, mu1-mu2, 2*sigma1 + 2*sigma2)\n #return np.sqrt(pi1*pi2) * (1-2*i)\n return 1-i[0]", "def _holt_win_mul_mul_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma = _holt_win_init(\n x, xi, p, y, l, b, s, m)\n if alpha * beta == 0.0:\n return max_seen\n if beta > alpha or gamma > 1 - alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1] / s[i - 1]) + \\\n (alphac * (l[i - 1] * b[i - 1]**phi))\n b[i] = (beta * (l[i] / l[i - 1])) + (betac * b[i - 1]**phi)\n s[i + m - 1] = (y_gamma[i - 1] / (l[i - 1] *\n b[i - 1]**phi)) + (gammac * s[i - 1])\n return sqeuclidean((l * b**phi) * s[:-(m - 1)], y)", "def TatePairing_Extend(self, m, P, Q):\n return self.TatePairing(m, P, Q)**((card(self.basefield)-1)//m)", "def stitch(KPS1, KPS2, H1, H2, match): #---- stich image to previous one\r\n #--- projection image1 from plane to cylindrical ---\r\n total = np.minimum(match.shape[0],100); # total pairing number\r\n bin1 = match[0:total,0].astype(int); # feature no at image 1\r\n R1 = KPS1.keyz[bin1, 0]; # keypoint Y at image 1\r\n C1 = KPS1.keyz[bin1, 1]; # keypoint X at image 1\r\n V1, U1 = pano_tools.project_p2c_points(R1, C1, H1);\r\n #--- image 2 ---\r\n bin2 = match[0:total,1].astype(int); # feature no at image 2\r\n R2 = KPS2.keyz[bin2, 0]; # keypoint Y at image 2\r\n C2 = KPS2.keyz[bin2, 1]; # keypoint X at image 2\r\n Rc2 = H2[0]/2; Rp2= R2 - Rc2; \r\n Cc2 = H2[1]/2; Cp2= C2 - Cc2;\r\n #--- --- \r\n # {phi1,S1,TU1,TV1} = M*M matrix: which is derived by chosen 2 pairs \r\n # {phi0,S0,TU0,TV0} = scalar: which is initial guess by removing outlier\r\n # \r\n phi1,S1,TU1,TV1= pano_tools.derive_p2c_formula(U1,V1,Cp2,Rp2);\r\n seq,phi0,S0,TU0,TV0 = pano_tools.remove_ill_matched_pair(phi1,S1,TU1,TV1); \r\n #--- linear regression [not necessary] ---\r\n # U1X = U1[seq]; C2X = C2[seq]; V1X = V1[seq]; R2X = R2[seq]; \r\n # phi0,S0,TU0,TV0,Err= pano_tools.linear_regression(V1X,U1X,R2X,C2X, phi0,S0,TU0,TV0,H2)\r\n H2[3]= phi0; H2[4]= S0; H2[5]= TV0; H2[6]= TU0;", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def mr_pairs_have_less_mi_exp(filename=None):\n trials = 500\n matrix = [[0,0,0,0] for i in range(L)]\n motif = [random_site(L) for i in range(n)]\n scale = 0.01 #use this to prevent overflows in anneal\n scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale\n annealed_system = lambda :anneal(scaled_sse,\n lambda(matrix,motif):propose(matrix,motif),\n (matrix,motif),\n verbose=True,\n iterations=100000,\n stopping_crit = 0.1*scale)\n systems = [annealed_system() for i in xrange(500)]\n motifs = map(second,systems)\n ics = map(motif_ic,motifs)\n control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]\n mis = map(total_motif_mi,motifs)\n control_mis = map(total_motif_mi,control_motifs)\n plt.scatter(mis,control_mis)\n plt.xlabel(\"M-R System Mutual Information (bits)\")\n plt.ylabel(\"Annealed Motif Mutual Information (bits)\")\n plt.plot([0,5],[0,5])\n maybesave(filename)\n #mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)\n return mis,control_mis", "def powerLaw(minskew,minkurt,transform,x):\n exponent = 0.05\n while exponent < 20:\n y = x**exponent\n (newskew,newkurt) = computeMoments(y)\n (minskew,minkurt,transform) = checkMin(minskew,minkurt,newskew,newkurt,transform,exponent)\n exponent *= 1.5\n #endwhile\n return (minskew,minkurt,transform)", "def weight_mm(self,m1,m2):\n lw = 1.\n\n # particle id and isolation\n lw *= self._muIDISOWeight.value(m1.pt(),m1.eta(),'0')\n lw *= self._muIDISOWeight.value(m2.pt(),m2.eta(),'0')\n\n # Trigger\n hlt_sf_run2012_a = (self._muTRIGGERWeight_leg8_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_A.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0'))\n\n hlt_sf_run2012_b = (self._muTRIGGERWeight_leg8_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_B.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0'))\n \n lw *= (0.5*hlt_sf_run2012_a + 0.5*hlt_sf_run2012_b) ##percentage according to the lumi in which they were not prescaled (apparently same efficinecy for AB)\n #lw *= 0.966 ## temporary solution!\n\n if abs(configuration.LeptonTnPfactor)<0.01 :\n return lw\n else:\n return lw + configuration.LeptonTnPfactor*self.uncertainty_mm(m1,m2)", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def pairing(PAN, th = None): #---- pairing feature points\r\n if th is None: th= PAN.knn_th; # setup KNN2 threshold\r\n img_no = PAN.count;\r\n N = int(img_no*(img_no-1)/2); # Total combination\r\n PAN.pair_no = np.zeros((img_no, img_no)); # matching point number \r\n PAN.matchinfo = []; # Matching infomation\r\n PAN.match_seq = np.zeros((N, 2));\r\n index = 0;\r\n for ky in range(0, img_no-1):\r\n #for kx in range(ky+1, img_no): # match to all image\r\n kx = ky+1; # only match to next image\r\n print('pairing between image no: ',ky,' and no: ',kx);\r\n match = FD_tools.matching(PAN.DESCs[ky],PAN.DESCs[kx],th=th);\r\n PAN.pair_no[ky,kx] = match.shape[0];\r\n PAN.pair_no[kx,ky] = match.shape[0];\r\n PAN.matchinfo.append(match);\r\n PAN.match_seq[index,0] = ky; \r\n PAN.match_seq[index,1] = kx;\r\n index += 1;\r\n print('Matching process complete!');", "def _holt_win__mul(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma = _holt_win_init(\n x, xi, p, y, l, b, s, m)\n if alpha == 0.0:\n return max_seen\n if gamma > 1 - alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1] / s[i - 1]) + (alphac * (l[i - 1]))\n s[i + m - 1] = (y_gamma[i - 1] / (l[i - 1])) + (gammac * s[i - 1])\n return sqeuclidean(l * s[:-(m - 1)], y)", "def get_pmi(x, y, z, k=1, normalize=None, norm=np.inf, estimator='fp'):\n\n if normalize:\n x = normalize(x)\n y = normalize(y)\n z = normalize(z)\n\n # construct state array for the joint processes:\n xz = np.c_[x,z]\n yz = np.c_[y,z]\n xyz = np.c_[x,y,z]\n\n if estimator == 'naive':\n # compute individual entropies\n # TODO: pass in min_dist\n hz = get_h(z, k=k, norm=norm)\n hxz = get_h(xz, k=k, norm=norm)\n hyz = get_h(yz, k=k, norm=norm)\n hxyz = get_h(xyz, k=k, norm=norm)\n\n pmi = hxz + hyz - hxyz - hz\n\n elif estimator == 'fp':\n\n # construct k-d trees\n z_tree = cKDTree(z)\n xz_tree = cKDTree(xz)\n yz_tree = cKDTree(yz)\n xyz_tree = cKDTree(xyz)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not the data point itself\n # dist, idx = xyz_tree.query(xyz, k=k+1, p=norm)\n dist, idx = xyz_tree.query(xyz, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the relevant subspace is strictly < epsilon\n n = len(x)\n nxz = np.empty(n, dtype=np.int)\n nyz = np.empty(n, dtype=np.int)\n nz = np.empty(n, dtype=np.int)\n\n for ii in range(n):\n # nz[ii] = len( z_tree.query_ball_point( z_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # nxz[ii] = len(xz_tree.query_ball_point(xz_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # nyz[ii] = len(yz_tree.query_ball_point(yz_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nz[ii] = len( z_tree.query_ball_point( z_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n nxz[ii] = len(xz_tree.query_ball_point(xz_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n nyz[ii] = len(yz_tree.query_ball_point(yz_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n pmi = digamma(k) + np.mean(digamma(nz +1) -digamma(nxz +1) -digamma(nyz +1))\n\n elif estimator == 'ps':\n # (I am fairly sure that) this is the correct implementation of the estimator,\n # but the estimators is just crap.\n\n # construct k-d trees\n xz_tree = cKDTree(xz, leafsize=2*k)\n yz_tree = cKDTree(yz, leafsize=2*k)\n\n # determine k-nn distances\n n = len(x)\n rxz = np.empty(n, dtype=np.int)\n ryz = np.empty(n, dtype=np.int)\n\n # rxz, dummy = xz_tree.query(xz, k=k+1, p=norm) # +1 to account for distance to itself\n # ryz, dummy = yz_tree.query(xz, k=k+1, p=norm) # +1 to account for distance to itself; xz NOT a typo\n rxz, dummy = xz_tree.query(xz, k=k+1, p=np.inf) # +1 to account for distance to itself\n ryz, dummy = yz_tree.query(xz, k=k+1, p=np.inf) # +1 to account for distance to itself; xz NOT a typo\n\n pmi = yz.shape[1] * np.mean(log(ryz[:,-1]) - log(rxz[:,-1])) # + log(n) -log(n-1) -1.\n\n else:\n raise NotImplementedError(\"Estimator one of 'naive', 'fp', 'ps'; currently: {}\".format(estimator))\n\n return pmi", "def _holt_mul_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n if alpha == 0.0:\n return max_seen\n if beta > alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1] * b[i - 1]**phi))\n b[i] = (beta * (l[i] / l[i - 1])) + (betac * b[i - 1]**phi)\n return sqeuclidean(l * b**phi, y)", "def gen_samp_hammersley(self, i1, i2, m, n, vectorized = True):\n dist = self.dist\n \n def hammersley_sequence (i1, i2, m, n):\n \n \n if ( i1 <= i2 ):\n i3 = +1\n else:\n i3 = -1\n \n l = abs ( i2 - i1 ) + 1\n r = np.zeros ( [ m, l ] )\n k = 0\n \n for i in range ( i1, i2 + i3, i3 ):\n \n t = np.ones ( m - 1 )\n \n t = t * i\n #\n # Carry out the computation.\n #\n prime_inv = np.zeros ( m - 1 )\n for j in range ( 0, m - 1 ):\n prime_inv[j] = 1.0 / prime ( j )\n \n r[0,k] = float ( i % ( n + 1 ) ) / float ( n )\n \n while ( 0 < np.sum ( t ) ):\n for j in range ( 0, m - 1 ):\n d = ( t[j] % prime ( j ) )\n r[j+1,k] = r[j+1,k] + float ( d ) * prime_inv[j]\n prime_inv[j] = prime_inv[j] / prime ( j )\n t[j] = ( t[j] // prime ( j ) )\n \n k = k + 1\n \n return r\n \n def prime (n):\n \n from sys import exit\n \n prime_max = 1600\n \n prime_vector = np.array ( [\n 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, \\\n 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, \\\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, \\\n 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, \\\n 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, \\\n 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, \\\n 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, \\\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, \\\n 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, \\\n 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, \\\n 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, \\\n 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, \\\n 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, \\\n 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, \\\n 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, \\\n 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, \\\n 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, \\\n 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, \\\n 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, \\\n 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, \\\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, \\\n 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, \\\n 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, \\\n 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, \\\n 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, \\\n 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, \\\n 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, \\\n 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, \\\n 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, \\\n 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, \\\n 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, \\\n 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, \\\n 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, \\\n 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, \\\n 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, \\\n 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, \\\n 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, \\\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, \\\n 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, \\\n 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, \\\n 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, \\\n 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, \\\n 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, \\\n 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, \\\n 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, \\\n 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, \\\n 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, \\\n 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, \\\n 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, \\\n 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, \\\n 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, \\\n 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, \\\n 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, \\\n 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, \\\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, \\\n 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, \\\n 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, \\\n 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, \\\n 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, \\\n 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, \\\n 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, \\\n 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, \\\n 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, \\\n 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, \\\n 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, \\\n 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, \\\n 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, \\\n 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, \\\n 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, \\\n 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, \\\n 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, \\\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, \\\n 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, \\\n 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, \\\n 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, \\\n 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, \\\n 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, \\\n 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, \\\n 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, \\\n 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, \\\n 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, \\\n 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, \\\n 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, \\\n 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, \\\n 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, \\\n 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, \\\n 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, \\\n 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, \\\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, \\\n 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, \\\n 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, \\\n 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, \\\n 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, \\\n 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, \\\n 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, \\\n 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, \\\n 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, \\\n 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, \\\n 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, \\\n 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, \\\n 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, \\\n 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, \\\n 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, \\\n 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, \\\n 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, \\\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, \\\n 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, \\\n 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, \\\n 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, \\\n 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, \\\n 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, \\\n 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, \\\n 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, \\\n 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, \\\n 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, \\\n 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, \\\n 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, \\\n 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, \\\n 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, \\\n 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, \\\n 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, \\\n 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, \\\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973,10007, \\\n 10009,10037,10039,10061,10067,10069,10079,10091,10093,10099, \\\n 10103,10111,10133,10139,10141,10151,10159,10163,10169,10177, \\\n 10181,10193,10211,10223,10243,10247,10253,10259,10267,10271, \\\n 10273,10289,10301,10303,10313,10321,10331,10333,10337,10343, \\\n 10357,10369,10391,10399,10427,10429,10433,10453,10457,10459, \\\n 10463,10477,10487,10499,10501,10513,10529,10531,10559,10567, \\\n 10589,10597,10601,10607,10613,10627,10631,10639,10651,10657, \\\n 10663,10667,10687,10691,10709,10711,10723,10729,10733,10739, \\\n 10753,10771,10781,10789,10799,10831,10837,10847,10853,10859, \\\n 10861,10867,10883,10889,10891,10903,10909,10937,10939,10949, \\\n 10957,10973,10979,10987,10993,11003,11027,11047,11057,11059, \\\n 11069,11071,11083,11087,11093,11113,11117,11119,11131,11149, \\\n 11159,11161,11171,11173,11177,11197,11213,11239,11243,11251, \\\n 11257,11261,11273,11279,11287,11299,11311,11317,11321,11329, \\\n 11351,11353,11369,11383,11393,11399,11411,11423,11437,11443, \\\n 11447,11467,11471,11483,11489,11491,11497,11503,11519,11527, \\\n 11549,11551,11579,11587,11593,11597,11617,11621,11633,11657, \\\n 11677,11681,11689,11699,11701,11717,11719,11731,11743,11777, \\\n 11779,11783,11789,11801,11807,11813,11821,11827,11831,11833, \\\n 11839,11863,11867,11887,11897,11903,11909,11923,11927,11933, \\\n 11939,11941,11953,11959,11969,11971,11981,11987,12007,12011, \\\n 12037,12041,12043,12049,12071,12073,12097,12101,12107,12109, \\\n 12113,12119,12143,12149,12157,12161,12163,12197,12203,12211, \\\n 12227,12239,12241,12251,12253,12263,12269,12277,12281,12289, \\\n 12301,12323,12329,12343,12347,12373,12377,12379,12391,12401, \\\n 12409,12413,12421,12433,12437,12451,12457,12473,12479,12487, \\\n 12491,12497,12503,12511,12517,12527,12539,12541,12547,12553, \\\n 12569,12577,12583,12589,12601,12611,12613,12619,12637,12641, \\\n 12647,12653,12659,12671,12689,12697,12703,12713,12721,12739, \\\n 12743,12757,12763,12781,12791,12799,12809,12821,12823,12829, \\\n 12841,12853,12889,12893,12899,12907,12911,12917,12919,12923, \\\n 12941,12953,12959,12967,12973,12979,12983,13001,13003,13007, \\\n 13009,13033,13037,13043,13049,13063,13093,13099,13103,13109, \\\n 13121,13127,13147,13151,13159,13163,13171,13177,13183,13187, \\\n 13217,13219,13229,13241,13249,13259,13267,13291,13297,13309, \\\n 13313,13327,13331,13337,13339,13367,13381,13397,13399,13411, \\\n 13417,13421,13441,13451,13457,13463,13469,13477,13487,13499 ] )\n \n if ( n < 0 or prime_max <= n ):\n print ( '' )\n print ( 'PRIME - Fatal error!' )\n print ( ' 0 <= N < %d' % ( prime_max ) )\n exit ( 'PRIME - Fatal error!' )\n \n return prime_vector[n] \n \n hamm_seq = hammersley_sequence(i1, i2, m, n)\n samp = dist.ppf(hamm_seq)\n \n if vectorized == False:\n return(samp)\n \n else: \n samp = samp.reshape(1, -1)\n return(samp)", "def take_EM_step(X, pi, A, B):\n # TODO: Write this function.\n pi_prime = np.zeros(pi.shape[0])\n A_prime = np.zeros((A.shape[0], A.shape[1]))\n B_prime = np.zeros((B.shape[0], B.shape[1]))\n for i in range(X.shape[0]):\n alpha = forward(X[i], pi, A, B)\n beta = backward(X[i], pi, A, B)\n p_xn = np.sum(alpha[X.shape[1]-1])\n\n #pi update\n alpha_0 = alpha[0] # alpha_0, i\n beta_0 = beta[0]\n pi_update = np.multiply(alpha_0, beta_0) / p_xn\n pi_prime += pi_update\n\n #a update\n for i_val in range(A.shape[0]):\n for j_val in range(A.shape[1]):\n tem = 0\n for t_val in range(X.shape[1]-1):\n tem += alpha[t_val][i_val] * A[i_val][j_val] * B[j_val][X[i][t_val+1]] * beta[t_val+1][j_val]\n tem = tem / p_xn\n A_prime[i_val][j_val] += tem\n\n #b update\n for k in range(B.shape[1]):\n b_sum = np.zeros(2)\n for p in range(X.shape[1]):\n if X[i][p] == k:\n alpha_t_i = alpha[p]\n beta_t_i = beta[p]\n b_sum += np.multiply(alpha_t_i, beta_t_i)\n b_sum = b_sum / p_xn\n B_prime[:, k] += b_sum\n\n #normalization\n pi_prime = pi_prime/pi_prime.sum()\n for e in range(A_prime.shape[0]):\n A_prime[e] = A_prime[e]/A_prime[e].sum()\n for d in range(B_prime.shape[0]):\n B_prime[d] = B_prime[d]/B_prime[d].sum()\n return (pi_prime, A_prime, B_prime)", "def cal_limit(prior_a, posterior_a,prior_b, posterior_b):\n limiter = 0\n a=0\n b=0\n for l in prior_a:\n limiter += math.pow((prior_a[l]-posterior_a[l]),2)\n a+=posterior_a[l]\n for l in prior_b:\n limiter += math.pow((prior_b[l]-posterior_b[l]),2)\n a+=posterior_b[l] \n # a=0 do not meet the condition, need to continue iteration\n if a==0:\n b=1\n print(\"Warning: line.py: sum posterior flow = 0\")\n else:\n b=math.sqrt(limiter)/a\n return b", "def map_all_sig_p(limitregion=False, region=\"allsky\"):\n \n # Get ids of all pixels that contain RHT data\n rht_cursor, tablename = get_rht_cursor(region = region)\n all_ids = get_all_rht_ids(rht_cursor, tablename)\n \n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n \n if limitregion is True:\n print(\"Loading all allsky data points that are in the SC_241 region\")\n # Get all ids that are in both allsky data and SC_241\n all_ids_SC = pickle.load(open(\"SC_241_healpix_ids.p\", \"rb\"))\n all_ids = list(set(all_ids).intersection(all_ids_SC))\n \n all_sigpGsq = np.zeros(len(all_ids))\n\n update_progress(0.0)\n for i, hp_index in enumerate(all_ids):\n #likelihood = Likelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n (hp_index, T, Q, U) = planck_tqu_cursor.execute(\"SELECT * FROM Planck_Nside_2048_TQU_Galactic WHERE id = ?\", hp_index).fetchone()\n (hp_index, TT, TQ, TU, TQa, QQ, QU, TUa, QUa, UU) = planck_cov_cursor.execute(\"SELECT * FROM Planck_Nside_2048_cov_Galactic WHERE id = ?\", (hp_index,)).fetchone()\n \n # sigma_p as defined in arxiv:1407.0178v1 Eqn 3.\n sigma_p = np.zeros((2, 2), np.float_) # [sig_Q^2, sig_QU // sig_QU, UU]\n sigma_p[0, 0] = (1.0/T**2)*QQ #QQ\n sigma_p[0, 1] = (1.0/T**2)*QU #QU\n sigma_p[1, 0] = (1.0/T**2)*QU #QU\n sigma_p[1, 1] = (1.0/T**2)*UU #UU\n \n # det(sigma_p) = sigma_p,G^4\n det_sigma_p = np.linalg.det(sigma_p)\n sigpGsq = np.sqrt(det_sigma_p)\n \n all_sigpGsq[i] = sigpGsq\n \n update_progress((i+1.0)/len(all_ids), message='Calculating: ', final_message='Finished Calculating: ')\n \n # Place into healpix map\n hp_sigpGsq = make_hp_map(all_sigpGsq, all_ids, Nside = 2048, nest = True)\n \n out_root = \"/disks/jansky/a/users/goldston/susan/Wide_maps/\"\n if limitregion:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_SC_241.fits\", hp_sigpGsq, coord = \"G\", nest = True) \n else:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_DR2sky.fits\", hp_sigpGsq, coord = \"G\", nest = True)", "def pointwise_mi(pi, pj, pij):\n return log(pij) - log(pi) - log(pj)", "def wigner_gaunt(l1, l2, m):\n pref = sqrt((2*l1 + 1)*(2*l2 + 1)/(4*pi))\n return np.array([pref*sqrt(2*lpp + 1)*float(wigner_3j(l1,l2,lpp,m,-m,0)*wigner_3j(l1,l2,lpp,0,0,0))\n for lpp in range(abs(l1-l2), l1+l2+1, 2)])", "def est_rel_entro_HJW(sampP, sampQ):\r\n \r\n sampP = formalize_sample(sampP)\r\n sampQ = formalize_sample(sampQ)\r\n \r\n [m, sizeP] = sampP.shape\r\n [n, seq_num] = sampQ.shape\r\n n = float(n)\r\n m = float(m)\r\n\r\n if (sizeP != seq_num):\r\n raise Exception('Input arguments P and Q must have the same number '\r\n 'of columns')\r\n [c_1, MLE_const] = const_gen(n)\r\n c_1 = np.tile(c_1, [1, seq_num])\r\n\r\n # order <= 21 to avoid floating point errors\r\n order = min(4 + math.ceil(1.2 * math.log(n)), 21)\r\n poly_entro = np.load('poly_coeff_entro.npy')\r\n coeff = -np.array(poly_entro[int(order)][1:])\r\n\r\n# empirical distros + fingerprints\r\n#\r\n# Map non-consecutive integer samples to consecutive integer numbers\r\n# along each column of X and Y (which start with 1 and end with the\r\n# total number of distinct samples in each column). For example,\r\n# [ 1 6 4 ] [ 1 3 3 ]\r\n# [ 2 6 3 ] -----> [ 2 3 2 ]\r\n# [ 3 2 2 ] [ 3 1 1 ]\r\n# [ 1e5 3 100 ] [ 4 2 4 ]\r\n# The purpose of this mapping is to shrink the effective data range to\r\n# avoid possible numerical overflows.\r\n\r\n concat = np.vstack([sampP, sampQ])\r\n [PQ_len, PQ_wid] = concat.shape\r\n [PQ_seq, dex] = [np.sort(concat, axis=0), np.argsort(concat, axis=0)]\r\n \r\n rows = np.mod(dex + np.arange(PQ_wid)*PQ_len, PQ_len)\r\n cols = np.tile(np.arange(PQ_wid), (PQ_len, 1))\r\n vals = np.cumsum(np.vstack([np.ones((1, PQ_wid), dtype=np.int64),\r\n np.sign(np.diff(PQ_seq, axis=0))]), axis=0)\r\n PQ_seq[rows, cols] = vals.reshape(PQ_seq[rows, cols].shape)\r\n S = np.amax(PQ_seq)\r\n sampP = PQ_seq[:int(m)]\r\n sampQ = PQ_seq[int(m):]\r\n\r\n e_p = np.apply_along_axis(lambda x: np.bincount(x - 1, minlength=S),\r\n axis=0, arr=sampP)\r\n e_q = np.apply_along_axis(lambda x: np.bincount(x - 1, minlength=S),\r\n axis=0, arr=sampQ)\r\n bins = np.amax(np.hstack([e_p, e_q]))\r\n prob_q = np.arange(bins + 1)[:, np.newaxis] / n\r\n prob_mat = log_mat(prob_q, n, coeff, c_1, MLE_const)\r\n\r\n sum_p = np.zeros(prob_mat.shape)\r\n for row_iter in np.nditer((np.unique(e_q))[:, np.newaxis]):\r\n sum_p[row_iter] = np.sum(e_p * (e_q == row_iter), axis=0) / m\r\n d = np.sum((sum_p * prob_mat), axis=0) / math.log(2)\r\n entro = est_entro_JVHW(sampP)\r\n return np.maximum(0, - entro - d)", "def pulp_smash():", "def _holt_win_add_mul_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma = _holt_win_init(\n x, xi, p, y, l, b, s, m)\n if alpha * beta == 0.0:\n return max_seen\n if beta > alpha or gamma > 1 - alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1] / s[i - 1]) + \\\n (alphac * (l[i - 1] + phi * b[i - 1]))\n b[i] = (beta * (l[i] - l[i - 1])) + (betac * phi * b[i - 1])\n s[i + m - 1] = (y_gamma[i - 1] / (l[i - 1] + phi *\n b[i - 1])) + (gammac * s[i - 1])\n return sqeuclidean((l + phi * b) * s[:-(m - 1)], y)", "def weber_MS(I,J,x,y,w):\n M = max([((x[i]-x[j])**2 + (y[i]-y[j])**2) for i in I for j in I])\n model = Model(\"weber - multiple source\")\n X,Y,v,u = {},{},{},{}\n xaux,yaux,uaux = {},{},{}\n for j in J:\n X[j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"X(%s)\"%j)\n Y[j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"Y(%s)\"%j)\n for i in I:\n v[i,j] = model.addVar(vtype=\"C\", name=\"v(%s,%s)\"%(i,j))\n u[i,j] = model.addVar(vtype=\"B\", name=\"u(%s,%s)\"%(i,j))\n xaux[i,j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"xaux(%s,%s)\"%(i,j))\n yaux[i,j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"yaux(%s,%s)\"%(i,j))\n uaux[i,j] = model.addVar(vtype=\"C\", name=\"uaux(%s,%s)\"%(i,j))\n\n\n\n for i in I:\n model.addCons(quicksum(u[i,j] for j in J) == 1, \"Assign(%s)\"%i)\n for j in J:\n model.addCons(xaux[i,j]*xaux[i,j] + yaux[i,j]*yaux[i,j] <= v[i,j]*v[i,j], \"MinDist(%s,%s)\"%(i,j))\n model.addCons(xaux[i,j] == (x[i]-X[j]), \"xAux(%s,%s)\"%(i,j))\n model.addCons(yaux[i,j] == (y[i]-Y[j]), \"yAux(%s,%s)\"%(i,j))\n model.addCons(uaux[i,j] >= v[i,j] - M*(1-u[i,j]), \"uAux(%s,%s)\"%(i,j))\n\n model.setObjective(quicksum(w[i]*uaux[i,j] for i in I for j in J), \"minimize\")\n\n\n model.data = X,Y,v,u\n return model", "def mi(x,y,k=3,base=2):\n x = [[entry] for entry in x]\n y = [[entry] for entry in y]\n assert len(x)==len(y), \"Lists should have same length\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n intens = 1e-10 #small noise to break degeneracy, see doc.\n x = [list(p + intens*nr.rand(len(x[0]))) for p in x]\n y = [list(p + intens*nr.rand(len(y[0]))) for p in y]\n points = zip2(x,y)\n #Find nearest neighbors in joint space, p=inf means max-norm\n tree = ss.cKDTree(points)\n dvec = [tree.query(point,k+1,p=float('inf'))[0][k] for point in points]\n a,b,c,d = avgdigamma(x,dvec), avgdigamma(y,dvec), digamma(k), digamma(len(x)) \n return (-a-b+c+d)/log(base)", "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def _holt_win_mul_add_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, gamma, phi, alphac, betac, gammac, y_alpha, y_gamma = _holt_win_init(\n x, xi, p, y, l, b, s, m)\n if alpha * beta == 0.0:\n return max_seen\n if beta > alpha or gamma > 1 - alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) - (alpha * s[i - 1]) + \\\n (alphac * (l[i - 1] * b[i - 1]**phi))\n b[i] = (beta * (l[i] / l[i - 1])) + (betac * b[i - 1]**phi)\n s[i + m - 1] = y_gamma[i - 1] - \\\n (gamma * (l[i - 1] * b[i - 1]**phi)) + (gammac * s[i - 1])\n return sqeuclidean((l * phi * b) + s[:-(m - 1)], y)" ]
[ "0.64339936", "0.6327372", "0.60780907", "0.58735853", "0.5789274", "0.5718296", "0.5703921", "0.56716573", "0.5662825", "0.5588504", "0.55884796", "0.55807567", "0.55518264", "0.55494714", "0.55421656", "0.55327725", "0.55300945", "0.55287695", "0.55058295", "0.5497134", "0.5490854", "0.54837155", "0.546939", "0.5468225", "0.5450482", "0.54477704", "0.5432521", "0.5425564", "0.5411841", "0.5388964" ]
0.7943096
0
calculate gamma passing rate as percentage of voxels with gamma index >= 1.0 Optionally, a mask may be provided which excludes all voxels with mask==0
def gamma_passing_rate(gamma_map, mask=None): if mask is None: passing = np.count_nonzero(gamma_map<=1.0) total = gamma_map.size else: mask = mask.astype(bool) masked_gamma_map = gamma_map[mask] try: passing = np.count_nonzero(masked_gamma_map<=1.0) total = masked_gamma_map.size except: passing = 0 total = 0 logger.warning('Mask contained no Truth values, passing rate cannot be calculated') return passing, total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gamma(x):\n return 0.0", "def get_gamma(self, conv_op):", "def general_gamma_binary(uv, wavel, sep, PA, contrast):\n x, y = uv[:, 0], uv[:, 1]\n k = 2 * np.pi / wavel\n beta = mas2rad(sep)\n th = np.deg2rad(PA)\n i2 = 1\n i1 = 1 / contrast\n phi1 = k * x * beta * np.cos(th)/2\n phi2 = k * y * beta * np.sin(th)/2\n out = i1 * np.exp(-1j * (phi1 + phi2))\n out += i2 * np.exp(1j * (phi1 + phi2))\n return out / (i1 + i2)", "def gamma(k, z):\n return 1", "def Gamma_per_grain(ZZall, Gamma_a_Z, ZZ_fz, fdist, GG):\n\n # index in the ZZall array for the charges in ZZ_fz\n zi_down = np.where(ZZall == ZZ_fz[0])[0][0]# find the index of the ZZ_fz[0] in ZZall \n zi_up = np.where(ZZall == ZZ_fz[-1])[0][0]# find the index of the ZZ_fz[-1] in ZZall\n \n #Gamma_pe_a = np.sum(fz*Gamma_dotdot_scaled[zi_down:zi_up+1])\n Gamma_pe_a = np.sum(fdist*Gamma_a_Z[zi_down:zi_up+1])\n \n return Gamma_pe_a", "def calculateGammaFactors(self):\n return (self.time/self.expectedDuration)**self.beta", "def abv(og, fg):\n return abw(og, fg) * fg / 0.794", "def gamma(x):\r\n gammax = ((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92\r\n return gammax", "def guestimate_gamma(x_data, time):\n ga0 = np.clip(np.log(max(x_data[0], 0) / (x_data[-1] + 1e-6)) / time[-1], 1e-3, 1e3)\n return ga0", "def do_gamma(im, gamma):\n invert_gamma = 1.0/gamma\n lut = [pow(x/255., invert_gamma) * 255 for x in range(256)]\n lut = lut*3 # need one set of data for each band for RGB\n im = im.point(lut)\n return im", "def calculate_gamma(self, x):\n return (1 - self.keep_prob) * x.shape[-1] ** 2 / \\\n (self.block_size ** 2 * (x.shape[-1] - self.block_size + 1) ** 2)", "def calc_gamma_1d(x, fx, dfx, y, fpy, dfpy):\n beta = calc_beta(fx, dfx)\n print ' using beta = ', beta\n # upper and lower bounds\n sorted_x = numpy.array(sorted(x))\n sorted_y = numpy.array(sorted(y))\n if y.size == 0:\n delta_max = x.max() - x.min()\n delta_min = (sorted_x[1:] - sorted_x[:-1]).min()\n else:\n delta_max = max(x.max(), y.max()) - min(x.min(), y.min())\n delta_min = min((sorted_x[1:] - sorted_x[:-1]).min(), \\\n (sorted_y[1:] - sorted_y[:-1]).min())\n assert delta_max > delta_min\n gamma_min = 1. / delta_max\n gamma_max = pi / delta_min\n # logorithmic bisection for gamma\n while gamma_max / gamma_min > 1.1:\n print ' bisecting [', gamma_min, ',', gamma_max, '] for gamma...'\n gamma_mid = sqrt(gamma_max * gamma_min)\n res_ratio = calc_res_ratio_avg_1d(beta, gamma_mid, x, fx, dfx, y, fpy, dfpy)\n if res_ratio < 1.0:\n gamma_max = gamma_mid\n else:\n gamma_min = gamma_mid\n # final selected gamma\n gamma_mid = sqrt(gamma_max * gamma_min)\n print ' using gamma = ', gamma_mid\n return gamma_mid", "def gamma(flag, S, K, t, r, sigma, q):\n\n b = r-q\n\n return numerical_gamma(flag, S, K, t, r, sigma, b, f)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def mask_evaluation(annotation_mask, result_mask, idx):\n\n true_positive = np.sum(np.logical_and(annotation_mask == 255, result_mask == 255)) \n false_positive = np.sum(np.logical_and(result_mask == 255, annotation_mask != result_mask))\n false_negative = np.sum(np.logical_and(annotation_mask == 255, annotation_mask != result_mask))\n\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n f1_measure = 2 * ((precision * recall) / (precision + recall))\n\n return recall, precision, f1_measure", "def gammaFun(xx, minLum, maxLum, gamma, eq=1, a=None, b=None, k=None):\n # scale x to be in range minLum:maxLum\n xx = numpy.array(xx, 'd')\n maxXX = max(xx)\n if maxXX > 2.0:\n # xx = xx * maxLum / 255.0 + minLum\n xx = old_div(xx, 255.0)\n else: # assume data are in range 0:1\n pass\n # xx = xx * maxLum + minLum\n\n # eq1: y = a + (b*xx)**gamma\n # eq2: y = (a + b * xx)**gamma\n # eq4: y = a + (b + k*xx)**gamma # Pelli & Zhang 1991\n if eq == 1:\n a = minLum\n b = (maxLum - a)**(old_div(1, gamma))\n yy = a + (b * xx)**gamma\n elif eq == 2:\n a = minLum**(old_div(1, gamma))\n b = maxLum**(old_div(1, gamma)) - a\n yy = (a + b * xx)**gamma\n elif eq == 3:\n # NB method 3 was an interpolation method that didn't work well\n pass\n elif eq == 4:\n nMissing = sum([a is None, b is None, k is None])\n # check params\n if nMissing > 1:\n msg = \"For eq=4, gammaFun needs 2 of a, b, k to be specified\"\n raise AttributeError(msg)\n elif nMissing == 1:\n if a is None:\n a = minLum - b**(old_div(1.0, gamma)) # when y=min, x=0\n elif b is None:\n if a >= minLum:\n b = 0.1**(old_div(1.0, gamma)) # can't take inv power of -ve\n else:\n b = (minLum - a)**(old_div(1.0, gamma)) # when y=min, x=0\n elif k is None:\n k = (maxLum - a)**(old_div(1.0, gamma)) - b # when y=max, x=1\n # this is the same as Pelli and Zhang (but different inverse function)\n yy = a + (b + k * xx)**gamma # Pelli and Zhang (1991)\n\n return yy", "def calculate_gamma(self):\n result = self.result\n # scaler = preprocessing.StandardScaler()\n # train_minmax = scaler.fit_transform(result)\n # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1]\n # self.gamma = (st_delta + st_rho) / 2\n self.gamma = result[:, 0] * result[:, 1]\n self.gamma_des_index = np.argsort(-self.gamma)", "def discount(x, gamma):\n\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]", "def discount(x, gamma):\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]", "def fgausbg(v,p):\n return np.exp(-0.5 * ((v[0] - p[0]) / p[1])**2) * p[2] + p[3]", "def solve_gamma(t, old, total):\n\n old, total = np.mean(old), np.mean(total)\n gamma = -1 / t * np.log(old / total)\n\n return gamma", "def adjust_brightness(image, mask, gamma):\r\n\r\n\tassert image.shape[:2] == mask.shape and gamma > 0\r\n\r\n\t## to increase the number of channel of the mask to three so that we can apply the masks\r\n\t## to image\r\n\tmasks = np.stack([mask, mask, mask], axis = -1)\r\n\r\n\tscale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\r\n\r\n\toutput = np.where(masks == 1,\r\n\t\t\t\t\t\t(image / scale) ** (1 / gamma) * scale,\r\n\t\t\t\t\t\timage)\r\n\r\n\t## to make sure the pixel intensity is within the range of uint8\r\n\toutput = np.clip(output, 0, 255).astype(np.uint8)\r\n\r\n\treturn output", "def gamma(a,b,c,d):\n g1 = max((c + d) * (1 - b) * b / (c*d * np.math.log(2)), 0.0)\n g2 = max((c + d) * 21**2 / (c*d * (1 - b) * b*a**2), 1.0)\n g = np.math.sqrt(g1 * np.math.log(g2, 2))\n return g", "def _gamma_from_drawdown_control(self):\n # From all previous total portfolio values get the highest one\n previous_peak = np.max(self.prev_port_vals)\n\n drawdown_t = 1 - self.prev_port_vals[-1] / previous_peak\n denom = np.max([self.max_drawdown - drawdown_t, self.eps]) # If drawdown limit is breached use a number very close to zero\n gamma = self.gamma_0 * self.max_drawdown / denom\n\n return gamma", "def discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]", "def gamma_expansion(image):\n # Clamps to prevent numerical instability of gradients near zero.\n return image.clamp(1e-8) ** 2.2", "def apply_gamma_correction(img, gamma=1.2):\r\n shape = img.shape\r\n if len(shape) > 2 and shape[2] == 1:\r\n img = img.reshape(shape[:2])\r\n\r\n assert img.dtype == np.uint8, '0 to 255 uint8 for pixel values are expected'\r\n assert len(img.shape) == 2, 'only on one channel'\r\n\r\n inv_gamma = 1.0 / gamma\r\n table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype(np.uint8)\r\n return cv2.LUT(np.array(img, dtype=np.uint8), table).reshape(shape)", "def F_tot(r, v, a, gamma, kT, dt, rc):\n nr = norm_numba(r)\n ftot = a * wR(r, rc) * r/nr \\\n - gamma * wR(r, rc)**2 * dot_numba(r, v) * r/nr**2 \\\n + sqrt(2*gamma*kT) * wR(r, rc) * randn() / sqrt(dt) * r/nr\n return ftot", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def mask_density(mask):\n return get_number_of_unpruned_weights(mask).float() / get_number_of_weights(mask).float()" ]
[ "0.6066738", "0.5949536", "0.5939618", "0.59219104", "0.5909373", "0.5858351", "0.5855014", "0.5844399", "0.5783322", "0.577106", "0.56844586", "0.5661877", "0.5652896", "0.56257683", "0.5585807", "0.55354697", "0.55228657", "0.5496603", "0.54677624", "0.54339266", "0.54067963", "0.5378412", "0.53752416", "0.53624386", "0.5352631", "0.53481615", "0.533838", "0.53266245", "0.52963406", "0.52944565" ]
0.7279056
0
Get the KML note label, use tag if exists or content otherwise
def getLabel(self): return self.content[:12]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLabel(self):\n result = self.content[:12]\n if result == \"\":\n if self.tags:\n result = str(self.tags.first)\n return result", "def bb_label(hit):\n try:\n labelid = hit.group(1)\n label = Label.objects.get(id=labelid)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : label })\n return T.render(C)\n except:\n # Usually thrown if the ID is invalid or doesn't exist\n return '[label]%s[/label]' % (labelid)", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def label(self):\r\n return self._text", "def _get_label(self):\n return self.label", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def get_label(settings):", "def Label(self, default=None):\n return self.data.get('label', default)", "def Label(self, default=None):\n return self.data.get('label', default)", "def label(self) -> str:\n return self[\"label\"]", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def getMetaLabel(self, idx):\n return self.label_dict[idx].decode(\"utf-8\")", "def title_content(label=\"A title\"):\n return {'label':label}", "def bb_labelname(hit):\n try:\n real_name = hit.group(1)\n L = Label.objects.get(name=real_name)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : L })\n return T.render(C)\n except:\n # This will throw if the requested label is spelt incorrectly, or doesnt exist\n return '<img src=\"/static/transmit.png\" alt=\"Invalid Label\" border=\"0\" /> %s' % (real_name)", "def _processLabel(self, kitti_label):\n label = {\n 'category': kitti_label['type'].lower(),\n 'box2D': kitti_label['bbox'].copy(),\n 'box3D': {\n 'location': {\n 'x': kitti_label['location']['x'],\n 'y': kitti_label['location']['y'] - kitti_label['dimensions']['height'] / 2.0, # move to center\n 'z': kitti_label['location']['z'],\n },\n 'dimensions': kitti_label['dimensions'].copy(),\n 'rotation_y': kitti_label['rotation_y'],\n },\n 'info': {\n 'truncated': kitti_label['truncated'],\n 'occluded': kitti_label['occluded'],\n },\n }\n if 'trackId' in kitti_label:\n # set trackId if given\n label['info']['trackId'] = kitti_label['trackId']\n return label", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def tests_ti_document_get_label(self):\n super().group_get_label()", "def label(tree):\n return tree[0]", "def label(self) -> Optional[str]:\n return self._itempage.labels.get(\"en\", None)", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def get_label(self, key):\n return self.labels.get(key, None)", "def text(self):\n if hasattr(self,'label'):\n return str(self.label.text())\n else:\n return self.key", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_label(self, which_label: str, extra_label: str) -> str:\n result = self.row_dict.get(extra_label)\n if result:\n # We will use this label\n pass\n elif which_label == 'first_label':\n header = self.row_header\n first_label = next((i for i in header if i.startswith('label')),\n None)\n if first_label is None:\n raise LabelNotFoundError()\n result = self.row_dict[first_label]\n elif which_label in self.row_dict:\n result = self.row_dict[which_label]\n else:\n raise LabelNotFoundError()\n str_result = str(result)\n return str_result" ]
[ "0.68905085", "0.60365885", "0.60078704", "0.5971835", "0.592358", "0.5905867", "0.5823123", "0.58201313", "0.58201313", "0.57901335", "0.57656944", "0.5765235", "0.5754746", "0.57501554", "0.5739777", "0.57387817", "0.57319796", "0.57198286", "0.57072467", "0.5698205", "0.5698205", "0.5698205", "0.5698205", "0.5698205", "0.5698205", "0.5698205", "0.56695044", "0.5662329", "0.5650748", "0.56438273" ]
0.61446136
1
Get the KML note label, use tag if exists or content otherwise
def getLabel(self): result = self.content[:12] if result == "": if self.tags: result = str(self.tags.first) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLabel(self):\n return self.content[:12]", "def bb_label(hit):\n try:\n labelid = hit.group(1)\n label = Label.objects.get(id=labelid)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : label })\n return T.render(C)\n except:\n # Usually thrown if the ID is invalid or doesn't exist\n return '[label]%s[/label]' % (labelid)", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def label(self):\r\n return self._text", "def _get_label(self):\n return self.label", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def get_label(settings):", "def Label(self, default=None):\n return self.data.get('label', default)", "def Label(self, default=None):\n return self.data.get('label', default)", "def label(self) -> str:\n return self[\"label\"]", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def getMetaLabel(self, idx):\n return self.label_dict[idx].decode(\"utf-8\")", "def title_content(label=\"A title\"):\n return {'label':label}", "def bb_labelname(hit):\n try:\n real_name = hit.group(1)\n L = Label.objects.get(name=real_name)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : L })\n return T.render(C)\n except:\n # This will throw if the requested label is spelt incorrectly, or doesnt exist\n return '<img src=\"/static/transmit.png\" alt=\"Invalid Label\" border=\"0\" /> %s' % (real_name)", "def _processLabel(self, kitti_label):\n label = {\n 'category': kitti_label['type'].lower(),\n 'box2D': kitti_label['bbox'].copy(),\n 'box3D': {\n 'location': {\n 'x': kitti_label['location']['x'],\n 'y': kitti_label['location']['y'] - kitti_label['dimensions']['height'] / 2.0, # move to center\n 'z': kitti_label['location']['z'],\n },\n 'dimensions': kitti_label['dimensions'].copy(),\n 'rotation_y': kitti_label['rotation_y'],\n },\n 'info': {\n 'truncated': kitti_label['truncated'],\n 'occluded': kitti_label['occluded'],\n },\n }\n if 'trackId' in kitti_label:\n # set trackId if given\n label['info']['trackId'] = kitti_label['trackId']\n return label", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def tests_ti_document_get_label(self):\n super().group_get_label()", "def label(tree):\n return tree[0]", "def label(self) -> Optional[str]:\n return self._itempage.labels.get(\"en\", None)", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")", "def get_label(self, key):\n return self.labels.get(key, None)", "def text(self):\n if hasattr(self,'label'):\n return str(self.label.text())\n else:\n return self.key", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_label(self, which_label: str, extra_label: str) -> str:\n result = self.row_dict.get(extra_label)\n if result:\n # We will use this label\n pass\n elif which_label == 'first_label':\n header = self.row_header\n first_label = next((i for i in header if i.startswith('label')),\n None)\n if first_label is None:\n raise LabelNotFoundError()\n result = self.row_dict[first_label]\n elif which_label in self.row_dict:\n result = self.row_dict[which_label]\n else:\n raise LabelNotFoundError()\n str_result = str(result)\n return str_result" ]
[ "0.61443484", "0.6037176", "0.6007812", "0.5971338", "0.59226274", "0.5905204", "0.5823537", "0.58204675", "0.58204675", "0.5789757", "0.57661796", "0.5764314", "0.575489", "0.5750842", "0.5740014", "0.5737503", "0.57311463", "0.5720551", "0.57075447", "0.56981486", "0.56981486", "0.56981486", "0.56981486", "0.56981486", "0.56981486", "0.56981486", "0.56685185", "0.5661127", "0.5651006", "0.5642852" ]
0.6890144
0
Overrides default queryset to only display parent items
def get_queryset(self, request): query = super(GipsyMenu, self).get_queryset(request) return query.filter(parent__isnull=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def queryset(self, request, queryset):\r\n # Compare the requested value to decide how to filter the queryset.\r\n if self.value():\r\n return queryset.filter(parent_id=self.value())\r\n return queryset", "def get_children_queryset(self):\n pass", "def get_query_set(self):\r\n return super(TopLevelManager, self).get_query_set().filter(parent=None, hidden=False)", "def parent(self, parent_object):\n lookup = get_parent_lookup_kwargs(parent_object)\n return self.filter(**lookup)", "def parent(self, parent_object, limit_parent_language=True):\n lookup = get_parent_lookup_kwargs(parent_object)\n\n # Filter the items by default, giving the expected \"objects for this parent\" items\n # when the parent already holds the language state.\n if limit_parent_language:\n language_code = get_parent_language_code(parent_object)\n if language_code:\n lookup[\"language_code\"] = language_code\n\n return self.filter(**lookup)", "def parent(self, parent_object, limit_parent_language=True):\n return self.all().parent(parent_object, limit_parent_language)", "def parents(self):\n addresses = self.parent_addresses\n q = models.Q()\n for address in addresses:\n q |= models.Q(address=address)\n\n if addresses:\n pages = Page.objects.filter(q)\n\n # Order the pages in a hierarchy\n pages = sorted(list(pages), key=lambda page: addresses.index(page.address))\n else:\n pages = []\n\n return pages", "def show_available_parents(self):\n self.categoryParent.clear()\n\n parents = self.orm.fetch_parents()\n self.categoryParent.addItems([p.name for p in parents])\n\n self.categoryParent.addItem('')\n self.categoryParent.setCurrentText('')", "def filter_ancestor(self, queryset, name, ancestor):\n\n return queryset.filter(\n parent__in=ancestor.get_descendants(include_self=True)\n )", "def get_queryset(self):\n parents_query_dict = self.get_parents_query_dict()\n questiongroup_id = parents_query_dict['questiongroup']\n return QuestionGroup_Questions.objects\\\n .filter(questiongroup_id=questiongroup_id)\\\n .order_by('sequence')", "def get_queryset(self):\n if getattr(self, 'swagger_fake_view', False):\n return NotesGroup.objects.none()\n\n return NotesGroup.objects.prefetch_related(\n \"parent\"\n ).filter(parent__members__in=[self.request.user])", "def queryset(self, request):\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n \treturn qs\n if request.user.user_category == 'block':\n \treturn qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n \treturn qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n \treturn qs.filter(district=request.user.account.associated_with)\n # Register your models here.", "def get_parents_list(self):\n return []", "def queryset(self, request):\n qs = super(SiteAdmin, self).queryset(request)\n qs = Site.admin.select_related().filter(id__in=qs)\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def parentItem(self):\n return None", "def getParents(self):\n return self.parents[:]", "def get_queryset(self):\n\t\treturn super(CourseDocument, self).get_queryset().select_related(\n\t\t 'belongs_to'\n\t\t)", "def parent_resources(cls):\n parent = cls.parent_resource\n parents = [parent]\n\n try:\n while True:\n parent = parent.parent_resource\n parents.append(parent)\n except AttributeError:\n pass\n\n parents.reverse()\n return parents", "def parent_comments_in_reverse_order(self):\n return self.exclude(parent__isnull=False).order_by(\"-created_at\")\\\n .select_related(\"user\")", "def get_queryset(self, *args, **kwargs):\n return CommentQuerySet(self.model, using=self._db).order_by(\n self.tree_id_attr,\n self.left_attr\n )", "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()", "def get_queryset(self):\n return Item.objects.filter(owner=self.request.user).order_by('-created').prefetch_related('tags')", "def get_filterable_queryset(self):\n site = self.get_site()\n\n if not site:\n return self.get_model_class().objects.none()\n\n queryset = self.get_model_class().objects.in_site(site).live()\n\n filterable_list_block = self.get_filterable_list_wagtail_block()\n if filterable_list_block is None:\n return queryset\n\n if filterable_list_block.value['filter_children']:\n queryset = queryset.child_of(self)\n elif filterable_list_block.value['filter_siblings']:\n queryset = queryset.sibling_of(self)\n\n return queryset", "def get_parents(self):\n return self.parents", "def fm_all_parents(self):\n return self._relation_lst[self.PARENT].copy()", "def get_queryset(self):\n return self._get_base_queryset().filter(deleted__isnull=True)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('work__writers')\n qs = qs.prefetch_related('artist')\n qs = qs.prefetch_related('record_label')\n return qs", "def filter_top_level(item):\n return item.parent_item is None", "def get_queryset(self):\n return None", "def get_queryset(self):\n return None" ]
[ "0.7645417", "0.69569004", "0.68567455", "0.6710352", "0.6638177", "0.6477407", "0.63991666", "0.63664937", "0.62955433", "0.6254188", "0.6236161", "0.6217325", "0.6079583", "0.6017169", "0.59940475", "0.58996534", "0.58132213", "0.57855225", "0.5774182", "0.5771834", "0.5758726", "0.575191", "0.5743892", "0.5742276", "0.57404333", "0.57374763", "0.5736643", "0.57245463", "0.5718302", "0.5718302" ]
0.75941396
1
Context manager that ignores all of the specified exceptions. This will be in the standard library starting with Python 3.4.
def ignored(*exceptions): try: yield except exceptions: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suppress(*exceptions):\n try:\n yield\n except exceptions:\n pass", "def suppress(*exceptions):\n try:\n yield\n except exceptions:\n pass", "def try_safety():\n try:\n yield\n except Exception as e:\n pass", "def ignored(*exceptions):\n import logging\n import pprint\n try:\n yield\n except exceptions:\n logging.warning(pprint.pformat(exceptions[0]))\n pass", "def unexpectedException(self):", "def try_reraise(*args: Any, **kwargs: Any) -> Any:\n try:\n yield\n except Exception: # pylint: disable=broad-except\n reraise(*args, **kwargs)", "def thread_exceptions():\n exceptions = []\n # Python 3.8+\n orig_hook = getattr(threading, 'excepthook', None)\n if orig_hook is not None:\n threading.excepthook = functools.partial(\n _thread_except_hook, exceptions,\n )\n try:\n yield exceptions\n finally:\n if orig_hook is not None:\n threading.excepthook = orig_hook", "def _wrap_exceptions(self):\n try:\n yield\n except OSError as err:\n if is_permission_err(err):\n raise AccessDenied(\n pid=None, name=self._name,\n msg=\"service %r is not querable (not enough privileges)\" %\n self._name)\n elif err.winerror in (cext.ERROR_INVALID_NAME,\n cext.ERROR_SERVICE_DOES_NOT_EXIST):\n raise NoSuchProcess(\n pid=None, name=self._name,\n msg=\"service %r does not exist)\" % self._name)\n else:\n raise", "def track_exceptions(f, caught, default=None):\n def _catch(_):\n caught.append(sys.exc_info())\n return default\n return excepts(Exception, f, _catch)", "def neutralContextManager():\n yield", "def catch_gracefully():\n def _outer(func):\n\n @functools.wraps(func)\n def _inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if isinstance(e, IGNORE_EXCEPTIONS):\n raise\n else:\n logger.error(\"Catched exception %s when running %s\", e, func)\n logger.exception(e)\n\n return _inner\n\n return _outer", "def suppress_oserror(*errnos):\n try:\n yield\n except OSError as e:\n if e.errno not in errnos:\n raise e", "def except__else(self, exception: BaseException) -> typing.Any:\n raise exception", "def exception(self, *args, **kwargs):", "def unexpected_error(self, exception):", "def WrappedException(self) -> object:", "def from_none(exc):\n exc.__cause__ = None\n exc.__suppress_context__ = True\n return exc", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback", "def yield_and_raise(data, exc):\n yield from data\n raise exc", "def exception(self, e):\n pass", "def my_excepthook(tp,va,tb):\n # The API module names need to be augmented\n # with all other modules in GTC, as an error\n # may arise anywhere. `code` is the Python\n # module that provides the interpreter. `gtc`\n # is the module in which scripts are executed\n # so neither of these are accessible.\n\n modules = submodules + (\n 'context',\n 'lib',\n 'LU',\n 'uncertain_array',\n 'persistence',\n 'named_tuples',\n 'node',\n 'vector'\n # , 'cholesky','svd'\n )\n # These `outer_modules` provide the top level calling\n # context, which we need to remove.\n outer_modules = ('code','gtc')\n \n # Need to step through the traceback chain\n # until something in the GTC libraries is\n # found. The source for these is unavailable\n # to users, so we do not report beyond that.\n \n # `depth` will be set to avoid inaccessible\n # GTC modules\n outer = 0\n depth = 0\n t = tb\n while True: \n name = os.path.splitext(\n os.path.basename(t.tb_frame.f_code.co_filename)\n )[0]\n \n if name in outer_modules: outer += 1\n if name in modules: break\n \n depth += 1\n \n if t.tb_next is not None: \n t = t.tb_next\n else:\n break\n\n tb_text = traceback.format_exception(tp,va,tb,depth)\n\n # The first line is just the intro: `Traceback (most recent call last):`\n print(tb_text[0], file=sys.stderr) \n \n # Now skip any reference to the outer modules and also\n # skip any reference to GTC modules\n for l in tb_text[1+outer:]: \n print(l, file=sys.stderr)", "def suppress_exceptions(cls, value=True):\n cls._suppress_exceptions = value", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def assertNotRaises(self, exceptions, msg=None):\n try:\n yield\n except exceptions as e:\n self.fail(self._formatMessage(msg, f\"{e.__class__.__name__} raised.\"))", "def WrapNonExceptionThrows(self) -> bool:", "def multiprocess_except_hook(exctype, value, traceback):\n log.critical(\n 'Uncaught exception',\n exc_info=(exctype, value, traceback)\n )", "def test_context(self):\n\n with warnings.catch_warnings(record=True) as w:\n self.add_two(0)\n self.assertEqual(len(w), 1, \"Expected one warning, but got {} warnings.\".format(len(w)))\n\n with warnings.catch_warnings(record=True) as w:\n self.add_three(0)\n self.assertEqual(len(w), 0, \"Expected one warning, but got {} warnings.\".format(len(w)))\n\n with self.assertRaises(ZeroDivisionError, msg=\"Context manager should swallow unrelated exceptions\"), \\\n ImportAlarm(\"Unrelated\"):\n print(1/0)", "def patch_raise_exception() -> None:\n raise TrestleError('Forced raising of an errors')", "def handleExceptionsWrapper(*args, **kwargs):\n\n\t\t\t_exceptions__frame__ = True\n\n\t\t\ttry:\n\t\t\t\treturn object(*args, **kwargs)\n\t\t\texcept exceptions as error:\n\t\t\t\tfor handler in handlers:\n\t\t\t\t\thandler(error)" ]
[ "0.70430624", "0.7007877", "0.69663656", "0.68010443", "0.6427322", "0.6322548", "0.62421846", "0.62271523", "0.61808586", "0.60907197", "0.6061941", "0.60145235", "0.5924392", "0.59193337", "0.58716965", "0.5760053", "0.57316", "0.57221574", "0.56928086", "0.56844944", "0.56820786", "0.5661244", "0.56595695", "0.5654", "0.56364715", "0.55909055", "0.55869436", "0.55591667", "0.5556759", "0.55537367" ]
0.7026229
1
Create a Grid2DIterate (see Grid2DIterate.__new__) from a mask, where only unmasked pixels are included in the grid (if the grid is represented in 2D masked values are (0.0, 0.0)). The mask's pixel_scales and origin properties are used to compute the grid (y,x) coordinates.
def from_mask( cls, mask: Mask2D, fractional_accuracy: float = 0.9999, relative_accuracy: Optional[float] = None, sub_steps: Optional[List[int]] = None, ) -> "Grid2DIterate": grid_slim = grid_2d_util.grid_2d_slim_via_mask_from( mask_2d=mask, pixel_scales=mask.pixel_scales, sub_size=1, origin=mask.origin ) return Grid2DIterate( values=grid_slim, mask=mask.derive_mask.sub_1, fractional_accuracy=fractional_accuracy, relative_accuracy=relative_accuracy, sub_steps=sub_steps, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mask_grid(self):\n xg, yg = self._build_grid()\n mask = self._build_mask(xg, yg)\n mask = mask.reshape(xg.shape)\n\n return xg, yg, mask", "def via_mask_from(self, mask: Mask2D) -> Visuals2D:\r\n origin = self.origin_via_mask_from(mask=mask)\r\n mask_visuals = self.get(\"mask\", mask)\r\n border = self.get(\"border\", mask.derive_grid.border_sub_1.binned)\r\n\r\n return self.visuals + self.visuals.__class__(\r\n origin=origin, mask=mask_visuals, border=border\r\n )", "def origin_via_mask_from(self, mask: Mask2D) -> Grid2DIrregular:\r\n return self.get(\"origin\", Grid2DIrregular(values=[mask.origin]))", "def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask", "def grid_2d_via_deflection_grid_from(\r\n self, deflection_grid: np.ndarray\r\n ) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self - deflection_grid,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def flat_2D_grid(bounds, dx, dy):\n x = np.arange(bounds[0], bounds[1] + dx, dx)\n y = np.arange(bounds[2], bounds[3] + dy, dy)\n x_grid, y_grid = np.meshgrid(x, y)\n x_grid, y_grid = x_grid.flatten(), y_grid.flatten()\n\n return pd.DataFrame({'x': x_grid,\n 'y': y_grid,\n 'masked': np.zeros(x_grid.size, dtype='bool')})", "def padded_grid_from(self, kernel_shape_native: Tuple[int, int]) -> \"Grid2DIterate\":\r\n shape = self.mask.shape\r\n\r\n padded_shape = (\r\n shape[0] + kernel_shape_native[0] - 1,\r\n shape[1] + kernel_shape_native[1] - 1,\r\n )\r\n\r\n padded_mask = Mask2D.all_false(\r\n shape_native=padded_shape,\r\n pixel_scales=self.mask.pixel_scales,\r\n sub_size=self.mask.sub_size,\r\n )\r\n\r\n return Grid2DIterate.from_mask(\r\n mask=padded_mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def createmaskdf(mask_file):\n fh = netCDF4.Dataset(mask_file, 'r')\n lat = fh.variables['lat'][:]\n lon = fh.variables['lon'][:] + 360\n mask = fh.variables['mask'][:]\n lon, lat = np.meshgrid(lon, lat)\n mask_df = pd.DataFrame({'lat': lat.flatten(),\n 'lon': lon.flatten(),\n 'mask': mask.flatten()})\n # Retain only those entries with a mask value of 1\n mask_df = mask_df.loc[mask_df['mask'] == 1]\n # Drop unnecessary 'mask' column\n return mask_df.drop('mask', axis=1)", "def createmaskdf(mask_file):\r\n fh = netCDF4.Dataset(mask_file, 'r')\r\n lat = fh.variables['lat'][:]\r\n lon = fh.variables['lon'][:] + 360\r\n mask = fh.variables['mask'][:]\r\n lon, lat = np.meshgrid(lon, lat)\r\n mask_df = pd.DataFrame({'lat': lat.flatten(),\r\n 'lon': lon.flatten(),\r\n 'mask': mask.data.flatten()})\r\n # Retain only those entries with a mask value of 1\r\n mask_df = mask_df.loc[mask_df['mask'] == 1]\r\n # Drop unnecessary 'mask' column\r\n return mask_df.drop('mask', axis=1)", "def create_netcdf(pixels, out_path, scaled=False):\n dimensions = numpy.shape(pixels)\n if len(dimensions) == 2:\n nrows, ncols = dimensions\n channels = 1\n else:\n nrows, ncols, channels = dimensions\n\n out_nc = Dataset(out_path, \"w\", format=\"NETCDF4\")\n out_nc.createDimension('band', channels) # only 1 band for mask\n out_nc.createDimension('x', ncols)\n out_nc.createDimension('y', nrows)\n\n mask = out_nc.createVariable('soil_mask','f8',('band', 'x', 'y'))\n mask[:] = pixels\n\n out_nc.close()", "def clip_mask(self, da_mask: xr.DataArray, mask: bool = False):\n if not isinstance(da_mask, xr.DataArray):\n raise ValueError(\"Mask should be xarray.DataArray type.\")\n if not self.identical_grid(da_mask):\n raise ValueError(\"Mask grid invalid.\")\n da_mask = da_mask != 0 # convert to boolean\n if not np.any(da_mask):\n raise ValueError(\"No valid values found in mask.\")\n # clip\n row_slice, col_slice = ndimage.find_objects(da_mask.values.astype(np.uint8))[0]\n obj_clip = self._obj.isel({self.x_dim: col_slice, self.y_dim: row_slice})\n if mask: # mask values and add mask coordinate\n mask_bin = da_mask.isel({self.x_dim: col_slice, self.y_dim: row_slice})\n obj_clip.coords[\"mask\"] = xr.Variable(self.dims, mask_bin.values)\n obj_clip = obj_clip.raster.mask(obj_clip.coords[\"mask\"])\n return obj_clip", "def testMask2D(self):\n\n # This mask, applied on an image filled with 1, should result in an image\n # filled with 8 (since we sum 4 elements per channel and there are 2 input\n # channels).\n mask = np.array([[1, 1, 1],\n [1, 0, 0],\n [0, 0, 0]], dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.array([[8] * 3] * 3)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def _dilate_mask(mask, dilation_radius=5):\n disk = morphology.disk(dilation_radius, dtype=np.bool)\n dilated_mask = morphology.binary_dilation(\n np.squeeze(mask, axis=2), selem=disk)[..., np.newaxis]\n return dilated_mask", "def inflate_mask(mask):\n kernel = np.ones((12, 12), np.uint8)\n return cv2.dilate(mask, kernel, 1)", "def native(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n store_native=True,\r\n )", "def via_grid_from(self, grid: Grid2DLike) -> Visuals2D:\r\n if not isinstance(grid, Grid2D):\r\n return self.visuals\r\n\r\n origin = self.origin_via_mask_from(mask=grid.mask)\r\n\r\n return self.visuals + self.visuals.__class__(origin=origin)", "def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")", "def mask(self, mask):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask(mask)\n return ds_out", "def from_mask(cls, mask, name='other', affine=None):\n if affine is None:\n affine = np.eye(4)\n else:\n affine = np.asanyarray(affine)\n if affine.shape != (4, 4):\n raise ValueError(\n f'Affine transformation should be a 4x4 array or None, not {affine!r}'\n )\n\n mask = np.asanyarray(mask)\n if mask.ndim == 1:\n return cls.from_surface(np.where(mask != 0)[0], mask.size, name=name)\n elif mask.ndim == 3:\n voxels = np.array(np.where(mask != 0)).T\n return cls(name, voxel=voxels, affine=affine, volume_shape=mask.shape)\n else:\n raise ValueError(\n 'Mask should be either 1-dimensional (for surfaces) or '\n '3-dimensional (for volumes), not %i-dimensional' % mask.ndim\n )", "def mask_value(self, ds, mask_value=0):\n xg, yg, mask = self._mask_grid()\n ds = ds.reshape(xg.shape)\n ds[mask == 0] = mask_value\n return xg, yg, ds", "def blurring_grid_from(\r\n cls,\r\n mask: Mask2D,\r\n kernel_shape_native: Tuple[int, int],\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n blurring_mask = mask.derive_mask.blurring_from(\r\n kernel_shape_native=kernel_shape_native\r\n )\r\n\r\n return cls.from_mask(\r\n mask=blurring_mask,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n )", "def binned(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=super().binned,\r\n mask=self.mask.derive_mask.sub_1,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def slim(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def extratropics_unmasked(res='4x5', mask2D=False):\n\n # Create a mask of 1s for chosen area and or 0s elsewhere\n m = np.zeros(get_dims4res(res))\n lats = np.concatenate((np.arange(-89, -26, 1), np.arange(26, 90, 1)))\n lats = [get_gc_lat(i, res=res) for i in lats]\n for i in lats:\n m[:, i, :] = 1\n # Create a np.ma mask\n m = np.ma.masked_not_equal(m, 1)\n\n # Return 2D or 3D?\n if mask2D:\n return m[..., 0].mask\n else:\n return m.mask", "def mask(self):\n mask = np.full(self._grid.shape, False)\n mask[np.ix_(self._lat_indices, self._lon_indices)] = True\n return mask", "def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)", "def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")", "def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)", "def gridding_2d(points, values, img_shape, method='linear'):\n xi = np.linspace(np.min(points),\n np.max(points),\n img_shape[0],\n endpoint=False)\n yi = np.linspace(np.min(points),\n np.max(points),\n img_shape[1],\n endpoint=False)\n grid_x, grid_y = np.meshgrid(xi, yi)\n return griddata(points,\n values,\n (grid_x, grid_y),\n method=method,\n fill_value=0)" ]
[ "0.640892", "0.62864256", "0.617731", "0.6063297", "0.6046105", "0.5762346", "0.5672607", "0.5672142", "0.55856997", "0.5567239", "0.5490645", "0.5472396", "0.5471098", "0.5451633", "0.5439843", "0.54312015", "0.542528", "0.54130954", "0.5345164", "0.5322865", "0.5314616", "0.52817714", "0.5266248", "0.5253095", "0.52131003", "0.52094615", "0.5191652", "0.51852345", "0.5179952", "0.51240146" ]
0.7251152
0
Setup a blurringgrid from a mask, where a blurring grid consists of all pixels that are masked (and therefore have their values set to (0.0, 0.0)), but are close enough to the unmasked pixels that their values will be convolved into the unmasked those pixels. This when computing images from light profile objects. See Grid2D.blurring_grid_from for a full description of a blurring grid. This method creates the blurring grid as a Grid2DIterate.
def blurring_grid_from( cls, mask: Mask2D, kernel_shape_native: Tuple[int, int], fractional_accuracy: float = 0.9999, relative_accuracy: Optional[float] = None, sub_steps: Optional[List[int]] = None, ) -> "Grid2DIterate": blurring_mask = mask.derive_mask.blurring_from( kernel_shape_native=kernel_shape_native ) return cls.from_mask( mask=blurring_mask, fractional_accuracy=fractional_accuracy, relative_accuracy=relative_accuracy, sub_steps=sub_steps, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mask_grid(self):\n xg, yg = self._build_grid()\n mask = self._build_mask(xg, yg)\n mask = mask.reshape(xg.shape)\n\n return xg, yg, mask", "def blurred_image_2d_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n if not self.has_light_profile:\r\n return np.zeros(shape=grid.shape_slim)\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return psf.convolved_array_from_array_and_mask(\r\n array=image.binned.native + blurring_image.binned.native, mask=grid.mask\r\n )", "def blurring_grid_via_kernel_shape_from(\r\n self, kernel_shape_native: Tuple[int, int]\r\n ) -> \"Grid2DIterate\":\r\n\r\n return Grid2DIterate.blurring_grid_from(\r\n mask=self.mask,\r\n kernel_shape_native=kernel_shape_native,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def blurred_image_2d_from_grid_and_convolver(self, grid, convolver, blurring_grid):\r\n\r\n if not self.has_light_profile:\r\n return np.zeros(shape=grid.shape_slim)\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return convolver.convolve_image(image=image, blurring_image=blurring_image)", "def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask", "def blurred_images_of_planes_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n traced_grids_of_planes = self.traced_grids_of_planes_from_grid(grid=grid)\r\n traced_blurring_grids_of_planes = self.traced_grids_of_planes_from_grid(\r\n grid=blurring_grid\r\n )\r\n return [\r\n plane.blurred_image_2d_from_grid_and_psf(\r\n grid=traced_grids_of_planes[plane_index],\r\n psf=psf,\r\n blurring_grid=traced_blurring_grids_of_planes[plane_index],\r\n )\r\n for (plane_index, plane) in enumerate(self.planes)\r\n ]", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def flat_2D_grid(bounds, dx, dy):\n x = np.arange(bounds[0], bounds[1] + dx, dx)\n y = np.arange(bounds[2], bounds[3] + dy, dy)\n x_grid, y_grid = np.meshgrid(x, y)\n x_grid, y_grid = x_grid.flatten(), y_grid.flatten()\n\n return pd.DataFrame({'x': x_grid,\n 'y': y_grid,\n 'masked': np.zeros(x_grid.size, dtype='bool')})", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def from_mask(\r\n cls,\r\n mask: Mask2D,\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n grid_slim = grid_2d_util.grid_2d_slim_via_mask_from(\r\n mask_2d=mask, pixel_scales=mask.pixel_scales, sub_size=1, origin=mask.origin\r\n )\r\n\r\n return Grid2DIterate(\r\n values=grid_slim,\r\n mask=mask.derive_mask.sub_1,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n )", "def via_mask_from(self, mask: Mask2D) -> Visuals2D:\r\n origin = self.origin_via_mask_from(mask=mask)\r\n mask_visuals = self.get(\"mask\", mask)\r\n border = self.get(\"border\", mask.derive_grid.border_sub_1.binned)\r\n\r\n return self.visuals + self.visuals.__class__(\r\n origin=origin, mask=mask_visuals, border=border\r\n )", "def _batched_mask_to_box(masks: \"torch.Tensor\"):\n # torch.max below raises an error on empty inputs, just skip in this case\n\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to Cxheightxwidth\n shape = masks.shape\n height, width = shape[-2:]\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + height * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + width * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n out = out.reshape(*shape[:-2], 4)\n return out", "def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)", "def generate_mask(self, thresh=50, b_ground=None):\n img = self.load_image()\n thresh = np.zeros(img.shape, \"uint8\")\n if b_ground is not None:\n img = img - b_ground\n thresh[img > 25] = 255\n mask = ndimage.morphology.binary_dilation(thresh).astype(\"uint8\")\n self.mask = 255*mask", "def linear_box_blur(grid):\n \n blurred_grid = grid\n \n for i in range(GRID_WIDTH):\n for j in range(GRID_HEIGHT):\n # Average value of surrounding tiles\n total = 0\n num_totaled = 0\n \n for k in range(len(DX)):\n # Coords of tile to add into average\n x = i + DX[k]\n y = j + DY[k]\n \n # Only average it if on the grid\n if 0 <= x < GRID_WIDTH and 0 <= y < GRID_HEIGHT:\n total += grid[x][y]\n num_totaled += 1\n \n # Compute average\n blurred_grid[i][j] = total / num_totaled\n \n return blurred_grid", "def apply_mask(image, mask):\n image = image.astype(np.uint8)\n image = np.array(image)\n \n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n cv2.blur(image[:, :, c],(40,40)),\n image[:, :, c])\n return image", "def _make_mask(data, mask_bounds):\n # For each set of bounds add to the conditional.\n mask = False\n for lat_bounds, lon_bounds in mask_bounds:\n mask |= _add_to_mask(data, lat_bounds, lon_bounds)\n return mask", "def create_grid(self):\n # Domain definition\n network = pp.FractureNetwork2d(self.frac_pts.T, self.frac_edges.T, domain=self.box)\n gb = network.mesh(self.mesh_args) \n pp.contact_conditions.set_projections(gb)\n\n self.gb = gb\n self.Nd = self.gb.dim_max()\n self._Nd = self.gb.dim_max()\n g2d = self.gb.grids_of_dimension(2)[0]\n self.min_face = np.copy(self.mesh_size) #np.min(g2d.face_areas)\n self.min_cell = np.min(g2d.cell_volumes)\n self.p, self.t = analysis.adjustmesh(g2d, self.tips, self.GAP)\n self.displacement = self.p*0\n self.fa_no = g2d.face_nodes.indices.reshape((2, g2d.num_faces), order='f').T \n return gb", "def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")", "def blurred_images_of_planes_from_grid_and_convolver(\r\n self, grid, convolver, blurring_grid\r\n ):\r\n\r\n traced_grids_of_planes = self.traced_grids_of_planes_from_grid(grid=grid)\r\n traced_blurring_grids_of_planes = self.traced_grids_of_planes_from_grid(\r\n grid=blurring_grid\r\n )\r\n\r\n return [\r\n plane.blurred_image_2d_from_grid_and_convolver(\r\n grid=traced_grids_of_planes[plane_index],\r\n convolver=convolver,\r\n blurring_grid=traced_blurring_grids_of_planes[plane_index],\r\n )\r\n for (plane_index, plane) in enumerate(self.planes)\r\n ]", "def gen_obs_grid(self):\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n # for i in range(self.agent_dir + 1):\n # grid = grid.rotate_left()\n\n # Process occluders and visibility\n # Note that this incurs some performance cost\n if not self.see_through_walls:\n vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2,\n self.agent_view_size // 2))\n else:\n vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)\n\n # Make it so the agent sees what it's carrying\n # We do this by placing the carried object at the agent's position\n # in the agent's partially observable view\n agent_pos = grid.width // 2, grid.height // 2\n if self.carrying:\n grid.set(*agent_pos, self.carrying)\n else:\n grid.set(*agent_pos, None)\n\n return grid, vis_mask", "def _batched_mask_to_box_tf(masks: \"tf.Tensor\"):\n\n if tf.size(masks) == 0:\n return tf.zeros([*masks.shape[:-2], 4])\n\n # Normalize shape to Cxheightxwidth\n shape = shape_list(masks)\n height, width = shape[-2:]\n\n # Get top and bottom edges\n in_height = tf.reduce_max(masks, axis=-1)\n in_height_coords = in_height * tf.range(height)[None, :]\n bottom_edges = tf.reduce_max(in_height_coords, axis=-1)\n in_height_coords = in_height_coords + height * (~in_height)\n top_edges = tf.reduce_min(in_height_coords, axis=-1)\n\n # Get left and right edges\n in_width, _ = tf.reduce_max(masks, axis=-2)\n in_width_coords = in_width * tf.range(width)[None, :]\n right_edges, _ = tf.reduce_max(in_width_coords, axis=-1)\n in_width_coords = in_width_coords + width * (~in_width)\n left_edges, _ = tf.reduce_min(in_width_coords, axis=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1)\n out = out * tf.expand_dims(~empty_filter, -1)\n\n # Return to original shape\n out = tf.reshape(out, *shape[:-2], 4)\n return out", "def _get_masks(self, global_mask):\n\n # Get needed attributes\n args = ('threshold', 'n_orders', 'throughput', 'mask_trace_profile', 'wave_map', 'trace_profile')\n needed_attr = self.get_attributes(*args)\n threshold, n_orders, throughput, mask_trace_profile, wave_map, trace_profile = needed_attr\n\n # Convert list to array (easier for coding)\n mask_trace_profile = np.array(mask_trace_profile)\n\n # Mask pixels not covered by the wavelength grid.\n mask_wave = np.array([self.get_mask_wave(i_order) for i_order in range(n_orders)])\n\n # Apply user defined mask.\n if global_mask is None:\n mask_ord = np.any([mask_trace_profile, mask_wave], axis=0)\n else:\n mask = [global_mask for _ in range(n_orders)] # For each orders\n mask_ord = np.any([mask_trace_profile, mask_wave, mask], axis=0)\n\n # Find pixels that are masked in each order.\n general_mask = np.all(mask_ord, axis=0)\n\n # Mask pixels if mask_trace_profile not masked but mask_wave is.\n # This means that an order is contaminated by another\n # order, but the wavelength range does not cover this part\n # of the spectrum. Thus, it cannot be treated correctly.\n is_contaminated = np.array([tr_profile_ord > threshold for tr_profile_ord in trace_profile])\n general_mask |= (np.any(mask_wave, axis=0)\n & np.all(is_contaminated, axis=0))\n\n # Apply this new general mask to each order.\n mask_ord = (mask_wave | general_mask[None, :, :])\n\n return general_mask, mask_ord", "def generate_fg_mask(self, image_bg, image_fg, blur, closing, thresh):\n blur_dims = (2 * blur + 1, 2 * blur + 1)\n bg_blur = cv2.GaussianBlur(image_bg, blur_dims, 0)\n fg_blur = cv2.GaussianBlur(image_fg, blur_dims, 0)\n\n # mask = ||template - frame||^2 > threshold\n diff = cv2.absdiff(bg_blur, fg_blur)\n mask = np.sum(diff**2, axis=2) ** (1.0 / 2) > thresh\n mask = np.array(mask, dtype=np.uint8)\n\n # Fill holes\n if closing > 0:\n element = cv2.getStructuringElement(cv2.MORPH_RECT, (closing, closing))\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, element)\n\n return mask", "def create_mask(masking_positions, img, cells):\n left, right, top, bottom = masking_positions\n left += 1\n right += 1\n top += 1\n bottom += 1\n mask = np.ones((img.shape[0], img.shape[1]))*255\n\n # Compute corresponding positions and put zeros in the background part\n left = (img.shape[1]//cells[0])*left\n mask[:, :left] = 0\n right = img.shape[1]-(img.shape[1]//cells[0])*right\n mask[:, right:] = 0\n top = (img.shape[0]//cells[1])*top\n mask[:top, :] = 0\n bottom = img.shape[0]-(img.shape[0]//cells[0])*bottom\n mask[bottom:, :] = 0\n\n masks = mask.astype(np.uint8)\n return mask", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def partial_grid(grid, center, observable_depth):\n\n i, j = center\n offset = observable_depth\n\n mask = np.ones_like(grid, dtype=bool)\n mask[max(0, i - offset): i + offset + 1, max(0, j - offset): j + offset + 1] = False\n\n _grid = np.array(grid, copy=True)\n _grid[mask] = -1\n return _grid", "def blur_mask(mask, blur_kernel, threshold=0.1):\n k = pyfits.getdata(blur_kernel)\n k = k / k.sum()\n mask = hconvolve.hconvolve(mask, k)\n mask = np.where(mask >= threshold, 1, 0).astype('int')\n return mask" ]
[ "0.6808554", "0.6480119", "0.60403734", "0.5912199", "0.5645802", "0.5559395", "0.555485", "0.555485", "0.5522488", "0.5516855", "0.5453732", "0.5431576", "0.5390356", "0.5356915", "0.5345018", "0.5330812", "0.53234375", "0.53123385", "0.5288054", "0.5263885", "0.52208006", "0.52144223", "0.52067024", "0.5190168", "0.5182154", "0.51795185", "0.51740235", "0.51711255", "0.5163671", "0.51451105" ]
0.6980832
0
Return a ``Grid2D`` where the data is stored its ``slim`` representation, which is an ndarray of shape [total_unmasked_pixels sub_size2, 2]. If it is already stored in its ``slim`` representation it is returned as it is. If not, it is mapped from ``native`` to ``slim`` and returned as a new ``Grid2D``.
def slim(self) -> "Grid2DIterate": return Grid2DIterate( values=self, mask=self.mask, fractional_accuracy=self.fractional_accuracy, sub_steps=self.sub_steps, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padded_grid_from(self, kernel_shape_native: Tuple[int, int]) -> \"Grid2DIterate\":\r\n shape = self.mask.shape\r\n\r\n padded_shape = (\r\n shape[0] + kernel_shape_native[0] - 1,\r\n shape[1] + kernel_shape_native[1] - 1,\r\n )\r\n\r\n padded_mask = Mask2D.all_false(\r\n shape_native=padded_shape,\r\n pixel_scales=self.mask.pixel_scales,\r\n sub_size=self.mask.sub_size,\r\n )\r\n\r\n return Grid2DIterate.from_mask(\r\n mask=padded_mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def blurring_grid_via_kernel_shape_from(\r\n self, kernel_shape_native: Tuple[int, int]\r\n ) -> \"Grid2DIterate\":\r\n\r\n return Grid2DIterate.blurring_grid_from(\r\n mask=self.mask,\r\n kernel_shape_native=kernel_shape_native,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def via_grid_from(self, grid: Grid2DLike) -> Visuals2D:\r\n if not isinstance(grid, Grid2D):\r\n return self.visuals\r\n\r\n origin = self.origin_via_mask_from(mask=grid.mask)\r\n\r\n return self.visuals + self.visuals.__class__(origin=origin)", "def blurring_grid_from(\r\n cls,\r\n mask: Mask2D,\r\n kernel_shape_native: Tuple[int, int],\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n blurring_mask = mask.derive_mask.blurring_from(\r\n kernel_shape_native=kernel_shape_native\r\n )\r\n\r\n return cls.from_mask(\r\n mask=blurring_mask,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n )", "def image_2d_from(\r\n self, grid: aa.type.Grid2DLike, operated_only: Optional[bool] = None\r\n ) -> np.ndarray:\r\n\r\n hermite_y = hermite(n=self.n_y)\r\n hermite_x = hermite(n=self.n_x)\r\n\r\n y = grid[:, 0]\r\n x = grid[:, 1]\r\n\r\n shapelet_y = hermite_y(y / self.beta)\r\n shapelet_x = hermite_x(x / self.beta)\r\n\r\n return (\r\n shapelet_y\r\n * shapelet_x\r\n * np.exp(-0.5 * (y**2 + x**2) / (self.beta**2))\r\n / self.beta\r\n / (\r\n np.sqrt(\r\n 2 ** (self.n_x + self.n_y)\r\n * (np.pi)\r\n * factorial(self.n_y)\r\n * factorial(self.n_x)\r\n )\r\n )\r\n )", "def mesh_grid_from(\r\n self,\r\n source_plane_data_grid: Optional[Grid2D] = None,\r\n source_plane_mesh_grid: Optional[Grid2D] = None,\r\n sparse_index_for_slim_index: Optional[np.ndarray] = None,\r\n ) -> Mesh2DRectangular:\r\n return Mesh2DRectangular.overlay_grid(\r\n shape_native=self.shape, grid=source_plane_data_grid\r\n )", "def _bridge_bidirectional_hidden(self, hidden):\n num_layers = hidden.size(0) // 2\n _, batch_size, hidden_size = hidden.size()\n return hidden.view(num_layers, 2, batch_size, hidden_size)\\\n .transpose(1, 2).contiguous().view(num_layers, batch_size, hidden_size * 2)", "def mobile_net_v2(inputs):\n\n layer = _conv_block(inputs, 32, (3, 3), strides=(1, 1))\n\n for i in [16, 24, 32, 64, 96, 160, 320]:\n layer = _inverted_residual_block(layer, i, (3, 3), t=1, strides=1, n=1)\n\n layer = _conv_block(layer, 300, (1, 1), strides=(1, 1))\n layer = GlobalAveragePooling2D()(layer)\n layer = Reshape((1, 1, 300))(layer)\n layer = Dropout(0.3, name='Dropout')(layer)\n\n return layer", "def dualGrid(self):\n return self._dual_grid( )", "def _mask_grid(self):\n xg, yg = self._build_grid()\n mask = self._build_mask(xg, yg)\n mask = mask.reshape(xg.shape)\n\n return xg, yg, mask", "def blurred_image_2d_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n if not self.has_light_profile:\r\n return np.zeros(shape=grid.shape_slim)\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return psf.convolved_array_from_array_and_mask(\r\n array=image.binned.native + blurring_image.binned.native, mask=grid.mask\r\n )", "def clip_grid(grid, xr, yr, extra_m=5000):\n\n min_x = np.min(xr)\n min_y = np.min(yr)\n max_x = np.max(xr)\n max_y = np.max(yr)\n\n mask_x = np.logical_and(grid.x['data'] > min_x - extra_m,\n grid.x['data'] < max_x + extra_m)\n mask_y = np.logical_and(grid.y['data'] > min_y - extra_m,\n grid.y['data'] < max_y + extra_m)\n\n grid.x['data'] = grid.x['data'][mask_x]\n grid.y['data'] = grid.y['data'][mask_y]\n for f in grid.fields.keys():\n nz = len(grid.fields[f]['data']) # Nb of z levels\n grid.fields[f]['data'] = grid.fields[f]['data'][np.ix_(range(nz),\n mask_y, mask_x)]\n grid.nx = len(grid.x['data'])\n grid.ny = len(grid.y['data'])\n return grid", "def real_space_mask(self) -> aa.Mask2D:\r\n return self.max_log_likelihood_fit.dataset.real_space_mask", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d", "def walls_and_gridlines2_d(self):\n return self.container['walls_and_gridlines2_d']", "def SH_unmasked(res='4x5', mask2D=False):\n # Create a dummy array of ones, with all locations masked\n m = np.ma.array(np.ones(get_dims4res(res)), mask=True)\n if res == '4x5':\n lats = np.arange(-89, 0, 1)\n if res == '2x2.5':\n lats = np.arange(-90, 0, 1)\n print('CHECK (SH) mask for non 4x5 resolutions')\n lats = [get_gc_lat(i, res=res) for i in lats]\n for i in lats:\n m[:, i, :].mask = False\n # Return 2D or 3D?\n if mask2D:\n return m[..., 0].mask\n else:\n return m.mask", "def _build_raw_grid(self):\n self._raw_grid = parse_ascii_grid(self._ascii_grid)\n self.width = self._raw_grid.shape[0]\n self.height = self._raw_grid.shape[1]\n # If a start position has been specified, add it to grid.\n if self._agent_default_pos is not None:\n assert len(self._agent_default_pos) == 2\n x, y = self._agent_default_pos\n self._raw_grid[x, y] = 's'\n # If a goal position has been specified, add it to the grid.\n if self._goal_default_pos is not None:\n assert len(self._goal_default_pos) == 2\n x, y = self._goal_default_pos\n self._raw_grid[x, y] = 'g'", "def NH_unmasked(res='4x5', mask2D=False):\n # Create a dummy array of ones, with all locations masked\n m = np.ma.array(np.ones(get_dims4res(res)), mask=True)\n if res == '4x5':\n lats = np.arange(1, 91, 1)\n elif res == '2x2.5':\n lats = np.arange(0, 89, 1)\n print('CHECK (NH) mask for non 4x5 resolutions')\n lats = [get_gc_lat(i, res=res) for i in lats]\n for i in lats:\n m[:, i, :].mask = False\n # Return 2D or 3D?\n if mask2D:\n return m[..., 0].mask\n else:\n return m.mask", "def stripToGrid(pixelCount, columnCount):\n rowCount = int(pixelCount/columnCount)\n grid = [[0 for x in range(rowCount)] for y in range(columnCount)]\n\n pixel = 0\n for y in range(rowCount):\n for x in range(columnCount): \n column = x if y%2 == 0 else columnCount-1-x\n grid[column][y] = pixel \n pixel += 1 \n\n return grid", "def uniform(\r\n cls,\r\n shape_native: Tuple[int, int],\r\n pixel_scales: ty.PixelScales,\r\n origin: Tuple[float, float] = (0.0, 0.0),\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n pixel_scales = geometry_util.convert_pixel_scales_2d(pixel_scales=pixel_scales)\r\n\r\n grid_slim = grid_2d_util.grid_2d_slim_via_shape_native_from(\r\n shape_native=shape_native,\r\n pixel_scales=pixel_scales,\r\n sub_size=1,\r\n origin=origin,\r\n )\r\n\r\n return Grid2DIterate.no_mask(\r\n values=grid_slim,\r\n shape_native=shape_native,\r\n pixel_scales=pixel_scales,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n origin=origin,\r\n )", "def wrapper(profile, grid, *args, **kwargs):\r\n\r\n if isinstance(grid, grids.Grid2DIterate):\r\n mask = grid.mask.mask_new_sub_size_from_mask(\r\n mask=grid.mask, sub_size=max(grid.sub_steps)\r\n )\r\n grid_compute = grids.Grid2D.from_mask(mask=mask)\r\n result_list = func(profile, grid_compute, *args, **kwargs)\r\n result_list = [\r\n grid_compute.structure_from_result(result=result)\r\n for result in result_list\r\n ]\r\n result_list = [result.slim_binned for result in result_list]\r\n return grid.grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2DInterpolate):\r\n return func(profile, grid, *args, **kwargs)\r\n elif isinstance(grid, grids.Grid2DIrregular):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2D):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n\r\n if not isinstance(grid, grids.Grid2DIrregular) and not isinstance(\r\n grid, grids.Grid2D\r\n ):\r\n return func(profile, grid, *args, **kwargs)", "def southpole_unmasked(res='4x5', mask2D=False):\n\n # Create a mask of 1s for chosen area and or 0s elsewhere\n m = np.zeros(get_dims4res(res))\n # adjust for resolution at grid start points at 62\n if res == '4x5':\n lats = np.arange(-89, -62, 1) # define S pole as > 60S\n else:\n lats = np.arange(-89, -60, 1) # define S pole as > 60S\n# lats = np.arange(-89, -80,1 ) # define S pole as > 80S\n lats = [get_gc_lat(i, res=res) for i in lats]\n for i in lats:\n m[:, i, :] = 1\n\n # Create a np.ma mask\n m = np.ma.masked_not_equal(m, 1)\n\n # Return 2D or 3D?\n if mask2D:\n return m[..., 0].mask\n else:\n return m.mask", "def regular_grid(self, width, height, overlap=0.0, units='native'):\n\n if not self.src:\n raise RuntimeError('source not set or failed to open')\n\n if units == 'pixels':\n dims = width * self.src.res[0], height * self.src.res[1]\n elif units == 'native':\n dims = width, height\n else:\n raise ValueError('units must be \"native\" or \"pixels\"')\n\n gdf = grid.regular_grid(*self.src.bounds, *dims, overlap=overlap)\n gdf.crs = self.src.crs\n return gdf", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def gridding_2d(points, values, img_shape, method='linear'):\n xi = np.linspace(np.min(points),\n np.max(points),\n img_shape[0],\n endpoint=False)\n yi = np.linspace(np.min(points),\n np.max(points),\n img_shape[1],\n endpoint=False)\n grid_x, grid_y = np.meshgrid(xi, yi)\n return griddata(points,\n values,\n (grid_x, grid_y),\n method=method,\n fill_value=0)", "def iterated_grid_from(\r\n self, func: Callable, cls: object, grid_lower_sub_2d: Grid2D\r\n ) -> Grid2D:\r\n\r\n if not np.any(grid_lower_sub_2d):\r\n return grid_lower_sub_2d.slim\r\n\r\n iterated_grid = np.zeros(shape=(self.shape_native[0], self.shape_native[1], 2))\r\n\r\n threshold_mask_lower_sub = self.mask\r\n\r\n for sub_size in self.sub_steps[:-1]:\r\n grid_higher_sub = self.grid_at_sub_size_from(\r\n func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=sub_size\r\n )\r\n\r\n threshold_mask_higher_sub = self.threshold_mask_via_grids_from(\r\n grid_lower_sub_2d=grid_lower_sub_2d, grid_higher_sub_2d=grid_higher_sub\r\n )\r\n\r\n iterated_grid = self.iterated_grid_jit_from(\r\n iterated_grid=iterated_grid,\r\n threshold_mask_higher_sub=threshold_mask_higher_sub,\r\n threshold_mask_lower_sub=threshold_mask_lower_sub,\r\n grid_higher_sub_2d=grid_higher_sub,\r\n )\r\n\r\n if threshold_mask_higher_sub.is_all_true:\r\n iterated_grid_1d = grid_2d_util.grid_2d_slim_from(\r\n mask=self.mask, grid_2d_native=iterated_grid, sub_size=1\r\n )\r\n\r\n return Grid2D(values=iterated_grid_1d, mask=self.mask.derive_mask.sub_1)\r\n\r\n grid_lower_sub_2d = grid_higher_sub\r\n threshold_mask_lower_sub = threshold_mask_higher_sub\r\n\r\n grid_higher_sub = self.grid_at_sub_size_from(\r\n func=func,\r\n cls=cls,\r\n mask=threshold_mask_lower_sub,\r\n sub_size=self.sub_steps[-1],\r\n )\r\n\r\n iterated_grid_2d = iterated_grid + grid_higher_sub.binned.native\r\n\r\n iterated_grid_1d = grid_2d_util.grid_2d_slim_from(\r\n mask=self.mask, grid_2d_native=iterated_grid_2d, sub_size=1\r\n )\r\n\r\n return Grid2D(values=iterated_grid_1d, mask=self.mask.derive_mask.sub_1)", "def square_grid(\n bbox: List[float], n_cells: Union[int, float], options: Dict = {},\n) -> FeatureCollection:\n\n return rectangle_grid(bbox, n_cells, n_cells, options)", "def _grid_around_star(self, x0, y0, data):\n lenx, leny = data.shape\n xmin, xmax = max(x0 - self._box / 2, 0), min(x0 + self._box / 2 + 1, lenx - 1)\n ymin, ymax = max(y0 - self._box / 2, 0), min(y0 + self._box / 2 + 1, leny - 1)\n return np.mgrid[int(xmin) : int(xmax), int(ymin) : int(ymax)]" ]
[ "0.5512386", "0.54331136", "0.52063835", "0.5182092", "0.51143974", "0.4989602", "0.48805118", "0.4852229", "0.4849482", "0.481266", "0.48116904", "0.48049447", "0.4779547", "0.47336814", "0.4730924", "0.47078955", "0.4703412", "0.46861225", "0.46810302", "0.46566904", "0.46423447", "0.46254125", "0.46219644", "0.46113518", "0.45960662", "0.45902815", "0.45813093", "0.45595598", "0.45498818", "0.45488882" ]
0.5710812
0
Returns a new Grid2DIterate from this grid, where the (y,x) coordinates of this grid have a grid of (y,x) values, termed the deflection grid, subtracted from them to determine the new grid of (y,x) values. This is used by PyAutoLens to perform grid raytracing.
def grid_2d_via_deflection_grid_from( self, deflection_grid: np.ndarray ) -> "Grid2DIterate": return Grid2DIterate( values=self - deflection_grid, mask=self.mask, fractional_accuracy=self.fractional_accuracy, sub_steps=self.sub_steps, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def slim(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def via_grid_from(self, grid: Grid2DLike) -> Visuals2D:\r\n if not isinstance(grid, Grid2D):\r\n return self.visuals\r\n\r\n origin = self.origin_via_mask_from(mask=grid.mask)\r\n\r\n return self.visuals + self.visuals.__class__(origin=origin)", "def dualGrid(self):\n return self._dual_grid( )", "def native(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n store_native=True,\r\n )", "def image_2d_from(\r\n self, grid: aa.type.Grid2DLike, operated_only: Optional[bool] = None\r\n ) -> np.ndarray:\r\n\r\n hermite_y = hermite(n=self.n_y)\r\n hermite_x = hermite(n=self.n_x)\r\n\r\n y = grid[:, 0]\r\n x = grid[:, 1]\r\n\r\n shapelet_y = hermite_y(y / self.beta)\r\n shapelet_x = hermite_x(x / self.beta)\r\n\r\n return (\r\n shapelet_y\r\n * shapelet_x\r\n * np.exp(-0.5 * (y**2 + x**2) / (self.beta**2))\r\n / self.beta\r\n / (\r\n np.sqrt(\r\n 2 ** (self.n_x + self.n_y)\r\n * (np.pi)\r\n * factorial(self.n_y)\r\n * factorial(self.n_x)\r\n )\r\n )\r\n )", "def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)", "def copy(self) -> 'Grid':\n return Grid(self.size, [cell.copy() for cell in self.cells],\n ([line.copy() for line in self.leylines],\n [line.copy() for line in self.rights],\n [line.copy() for line in self.lefts]))", "def create_grid(self):\n\n # initial point of the path\n self.p0 = np.array([self.start_x, self.start_y])\n self.pf = np.array([self.end_x, self.end_y]) # final point of the path\n\n self.l1 = np.linalg.norm(np.subtract(self.pf, self.p0))\n self.l2 = 0.2 * self.l1\n\n # we define the 2 unit vectors self.d1 and self.d2 in which directions we are moving along\n # then we apply to them the size of the desired displacement\n self.d1 = np.subtract(self.pf, self.p0) / \\\n (np.linalg.norm(np.subtract(self.pf, self.p0)))\n\n self.d2 = np.array([-self.d1[1], self.d1[0]])\n self.d2 = self.d2/(np.linalg.norm(self.d2))\n\n # we create our grid moving with self.d1 and self.d2\n # each point of the grid has the following properties:\n # position, distance to self.d1, distance to self.pf, velocity\n n_displacements_2 = 0\n self.lines_list = []\n\n # variables for making easier the plotting afterwards\n self.x_list_grid = []\n self.y_list_grid = []\n time_n = 1\n\n while True:\n current_point = np.subtract(\n self.p0, self.d2*(n_displacements_2*self.displacement))\n current_length_2 = np.linalg.norm(\n np.subtract(current_point, self.p0))\n if current_length_2 > self.l2/2:\n if time_n == 2:\n break\n else:\n time_n += 1\n self.lines_list = list(reversed(self.lines_list))\n self.d2 = -1 * self.d2\n n_displacements_2 = 1\n current_point = np.subtract(\n self.p0, self.d2*(n_displacements_2*self.displacement))\n\n line_points = []\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n\n initial_point_1 = current_point\n while True:\n current_point = np.sum(\n [current_point, self.d1 * self.displacement], axis=0)\n current_length_1 = np.linalg.norm(\n np.subtract(current_point, initial_point_1))\n if current_length_1 >= self.l1:\n current_point = self.pf - self.d2*n_displacements_2*self.displacement\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n break\n\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n\n self.lines_list.append(line_points)\n n_displacements_2 += 1", "def copy_grid (grid):\r\n return copy.deepcopy(grid)", "def copy_grid (grid):\r\n import copy\r\n g=copy.deepcopy(grid)\r\n return g", "def SubtractGrids(A, B):\n if (A.xllcorner,A.yllcorner) == (B.xllcorner,B.yllcorner) and (A.ncols,A.nrows)==(B.ncols,B.nrows):\n maxVal = max( numpy.max(A.data), numpy.max(B.data))\n Ax = numpy.where(A.data != A.nodata, A.data+maxVal, 0.0)\n Bx = numpy.where(B.data != B.nodata, B.data+maxVal, 0.0)\n C = A.data - B.data\n #C = numpy.where(C != 0.0, C-2.*maxVal, 0.0)\n #C = numpy.where(C < 0.0, C+maxVal, C)\n #C = numpy.where(C != 0.0, C, A.nodata)\n New = grid(C, A.xllcorner, A.yllcorner, A.cellsize, 'subtract.grd', A.nodata)\n return New\n else:\n return \"Error: grid mismatch\"", "def get_relative_grid(self, xgbl, ygbl):\r\n self.rx = xgbl - self.x0\r\n self.ry = ygbl - self.y0\r\n\r\n # flatten the coordinates:\r\n self.rx = self.rx.ravel()\r\n self.ry = self.ry.ravel()", "def discretizespace(self):\n \n # Grid\n self.xgriddim = ( self.x0_n , self.x1_n , self.x2_n )\n \n self.xd = [ None , None , None ]\n self.xd[0] = np.linspace( self.DS.x_lb[0] , self.DS.x_ub[0] , self.x0_n )\n self.xd[1] = np.linspace( self.DS.x_lb[1] , self.DS.x_ub[1] , self.x1_n )\n self.xd[2] = np.linspace( self.DS.x_lb[2] , self.DS.x_ub[2] , self.x2_n )\n \n self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n , self.x2_n ) , dtype = int ) # grid of corresponding index\n \n # 1-D List of nodes\n self.nodes_n = self.x0_n * self.x1_n * self.x2_n\n self.nodes_state = np.zeros(( self.nodes_n , self.DS.n ), dtype = float ) # Number of nodes x state dimensions\n self.nodes_index = np.zeros(( self.nodes_n , self.DS.n ), dtype = int ) # Number of nodes x state dimensions", "def grid(self):\n return self.__grid", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def mirror(self):\n for l in range(self.numRows):\n mirrorGrid = []\n for l in range(self.numCols):\n row = self.grid[l][::-1]\n mirrorGrid.append(row)\n return mirrorGrid", "def gen_grid(delta):\n outline = get_continent_data_from_file()\n xmin, ymin, xmax, ymax = outline.total_bounds\n dx, dy = delta\n xgrid, ygrid = np.meshgrid(np.arange(xmin, xmax, dx), np.arange(ymin, ymax, dy))\n xgrid, ygrid = xgrid.flatten(), ygrid.flatten()\n grid = gpd.GeoDataFrame(geometry=([Polygon([\n [x - dx, y - dy],\n [x - dx, y],\n [x, y],\n [x, y - dy]]) for x, y in zip(xgrid, ygrid)\n ]), crs=outline.crs)\n\n grid_clip = gpd.clip(grid, outline).reset_index(drop=True)\n grid_clip = grid_clip[grid_clip.geom_type != 'LineString']\n grid_clip = grid_clip[grid_clip.geom_type != 'Point']\n grid_clip = grid_clip[grid_clip.geom_type != 'MultiLineString']\n grid_clip['grid_index'] = grid_clip.index\n grid_clip.grid_index = grid_clip['grid_index'].apply(str)\n return grid_clip", "def discretizespace(self):\n \n # Grid\n self.xgriddim = ( self.x0_n , self.x1_n )\n \n self.xd = [ None , None ]\n self.xd[0] = np.linspace( self.DS.x_lb[0] , self.DS.x_ub[0] , self.x0_n )\n self.xd[1] = np.linspace( self.DS.x_lb[1] , self.DS.x_ub[1] , self.x1_n )\n \n self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n ) , dtype = int ) # grid of corresponding index\n \n # 1-D List of nodes\n self.nodes_n = self.x0_n * self.x1_n\n self.nodes_state = np.zeros(( self.nodes_n , self.DS.n ), dtype = float ) # Number of nodes x state dimensions\n self.nodes_index = np.zeros(( self.nodes_n , self.DS.n ), dtype = int ) # Number of nodes x state dimensions", "def binned(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=super().binned,\r\n mask=self.mask.derive_mask.sub_1,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid", "def from_mask(\r\n cls,\r\n mask: Mask2D,\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n grid_slim = grid_2d_util.grid_2d_slim_via_mask_from(\r\n mask_2d=mask, pixel_scales=mask.pixel_scales, sub_size=1, origin=mask.origin\r\n )\r\n\r\n return Grid2DIterate(\r\n values=grid_slim,\r\n mask=mask.derive_mask.sub_1,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n )", "def grid(self):\n return self._grid", "def copy( self ):\n New = grid(self.data, self.xllcorner, self.yllcorner, self.cellsize, 'copy-'+self.name, self.nodata)\n return New", "def regrid(self, nx=None, ny=None, factor=1):\n\n if nx is not None:\n factor = nx / self.nx\n if ny is not None:\n factor = ny / self.ny\n\n nx = self.nx * factor\n ny = self.ny * factor\n dx = self.dx / factor\n dy = self.dy / factor\n\n x0 = self.corner_grid.x0\n y0 = self.corner_grid.y0\n args = dict(nxny=(nx, ny), dxdy=(dx, dy), x0y0=(x0, y0),\n proj=self.proj, pixel_ref='corner')\n g = Grid(**args)\n if self.pixel_ref == 'center':\n g = g.center_grid\n return g", "def _create_grid(self):\n\n # Check if hull dimensions are sensible for deck-dimensions (rows & lanes)\n grid = np.zeros((self.rows, self.lanes), dtype=np.int)\n if self.rows > self.hull_catheti_length and self.lanes >= self.hull_catheti_length * 2:\n for i in range(self.hull_catheti_length):\n t = (self.hull_catheti_length - i)\n grid[i] += np.hstack([-np.ones(t, dtype=np.int), np.zeros(self.lanes - t, dtype=np.int)])\n grid[i] += np.hstack([np.zeros(self.lanes - t, dtype=np.int), -np.ones(t, dtype=np.int)])\n else:\n logging.getLogger(__name__).error(\"Ship hull does not match grid dimensions -> return without hull\")\n return grid", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def get_grid(self, grid_idx):\n end_idx = self.sample_idx[grid_idx]\n start_idx = self.sample_idx[grid_idx-1] if grid_idx != 0 else 0\n grid = self.im_data[start_idx]\n label = self.label_data[start_idx:end_idx]\n state = self.state_data[start_idx:end_idx]\n goal = self.find_goal(grid[1])\n return grid, state, label, goal", "def create(x: Bounds, y: Bounds, grid_spacing):\n # Calculate grid bounds\n x0, y0 = Grid.bounding_box(x, y, grid_spacing)\n # print(f\"Grid.create: bounding box: x: {x0} y: {y0}\" )\n\n # Generate vectors of grid centers\n # Cell center offset\n cell_center_offset = grid_spacing/2\n x_vals = np.arange(x0.min + cell_center_offset, x0.max, grid_spacing)\n y_vals = np.arange(y0.max - cell_center_offset, y0.min, -grid_spacing)\n\n return x_vals, y_vals" ]
[ "0.6189202", "0.6080999", "0.600252", "0.5957412", "0.5948264", "0.5944243", "0.58593035", "0.5677554", "0.56470937", "0.5636628", "0.5587175", "0.5562993", "0.5488128", "0.54877734", "0.54263955", "0.54227567", "0.53559494", "0.5355037", "0.53536683", "0.53376454", "0.5331281", "0.5324082", "0.5315396", "0.53144586", "0.53098947", "0.5296877", "0.5285886", "0.5272047", "0.5260329", "0.5230757" ]
0.794933
0
Returns the blurring grid from a grid and create it as a Grid2DIterate, via an input 2D kernel shape. For a full description of blurring grids, checkout blurring_grid_from.
def blurring_grid_via_kernel_shape_from( self, kernel_shape_native: Tuple[int, int] ) -> "Grid2DIterate": return Grid2DIterate.blurring_grid_from( mask=self.mask, kernel_shape_native=kernel_shape_native, fractional_accuracy=self.fractional_accuracy, sub_steps=self.sub_steps, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def blurred_image_2d_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n if not self.has_light_profile:\r\n return np.zeros(shape=grid.shape_slim)\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return psf.convolved_array_from_array_and_mask(\r\n array=image.binned.native + blurring_image.binned.native, mask=grid.mask\r\n )", "def blurred_image_2d_from_grid_and_convolver(self, grid, convolver, blurring_grid):\r\n\r\n if not self.has_light_profile:\r\n return np.zeros(shape=grid.shape_slim)\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return convolver.convolve_image(image=image, blurring_image=blurring_image)", "def blurring_grid_from(\r\n cls,\r\n mask: Mask2D,\r\n kernel_shape_native: Tuple[int, int],\r\n fractional_accuracy: float = 0.9999,\r\n relative_accuracy: Optional[float] = None,\r\n sub_steps: Optional[List[int]] = None,\r\n ) -> \"Grid2DIterate\":\r\n\r\n blurring_mask = mask.derive_mask.blurring_from(\r\n kernel_shape_native=kernel_shape_native\r\n )\r\n\r\n return cls.from_mask(\r\n mask=blurring_mask,\r\n fractional_accuracy=fractional_accuracy,\r\n relative_accuracy=relative_accuracy,\r\n sub_steps=sub_steps,\r\n )", "def grid_2d_via_deflection_grid_from(\r\n self, deflection_grid: np.ndarray\r\n ) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self - deflection_grid,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def image_2d_from(\r\n self, grid: aa.type.Grid2DLike, operated_only: Optional[bool] = None\r\n ) -> np.ndarray:\r\n\r\n hermite_y = hermite(n=self.n_y)\r\n hermite_x = hermite(n=self.n_x)\r\n\r\n y = grid[:, 0]\r\n x = grid[:, 1]\r\n\r\n shapelet_y = hermite_y(y / self.beta)\r\n shapelet_x = hermite_x(x / self.beta)\r\n\r\n return (\r\n shapelet_y\r\n * shapelet_x\r\n * np.exp(-0.5 * (y**2 + x**2) / (self.beta**2))\r\n / self.beta\r\n / (\r\n np.sqrt(\r\n 2 ** (self.n_x + self.n_y)\r\n * (np.pi)\r\n * factorial(self.n_y)\r\n * factorial(self.n_x)\r\n )\r\n )\r\n )", "def blurred_images_of_planes_from_grid_and_convolver(\r\n self, grid, convolver, blurring_grid\r\n ):\r\n\r\n traced_grids_of_planes = self.traced_grids_of_planes_from_grid(grid=grid)\r\n traced_blurring_grids_of_planes = self.traced_grids_of_planes_from_grid(\r\n grid=blurring_grid\r\n )\r\n\r\n return [\r\n plane.blurred_image_2d_from_grid_and_convolver(\r\n grid=traced_grids_of_planes[plane_index],\r\n convolver=convolver,\r\n blurring_grid=traced_blurring_grids_of_planes[plane_index],\r\n )\r\n for (plane_index, plane) in enumerate(self.planes)\r\n ]", "def blurred_images_of_planes_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n traced_grids_of_planes = self.traced_grids_of_planes_from_grid(grid=grid)\r\n traced_blurring_grids_of_planes = self.traced_grids_of_planes_from_grid(\r\n grid=blurring_grid\r\n )\r\n return [\r\n plane.blurred_image_2d_from_grid_and_psf(\r\n grid=traced_grids_of_planes[plane_index],\r\n psf=psf,\r\n blurring_grid=traced_blurring_grids_of_planes[plane_index],\r\n )\r\n for (plane_index, plane) in enumerate(self.planes)\r\n ]", "def g2Dto1Dgrid(g2D, grid, average_grid=False):\n\n g2D = np.array(g2D)\n grid = np.array(grid)\n\n g1D_dic = DictList() # hash table of radii and values at radii\n\n for i in range(g2D.shape[0]):\n for j in range(g2D.shape[1]):\n g1D_dic[grid[i, j]] += [g2D[i, j]]\n\n g1D = np.array(list(map(\n lambda radius: [radius, np.mean(g1D_dic[radius])],\n sorted(g1D_dic))))\n\n if not(average_grid): return g1D\n\n g2D_cylindrical = np.zeros(grid.shape)\n for radius, mean_g in zip(*np.transpose(g1D)):\n for i, j in zip(*np.where(grid == radius)):\n g2D_cylindrical[i, j] = mean_g\n\n return g1D, g2D_cylindrical", "def via_grid_from(self, grid: Grid2DLike) -> Visuals2D:\r\n if not isinstance(grid, Grid2D):\r\n return self.visuals\r\n\r\n origin = self.origin_via_mask_from(mask=grid.mask)\r\n\r\n return self.visuals + self.visuals.__class__(origin=origin)", "def galaxy_blurred_image_dict_from_grid_and_convolver(\r\n self, grid, convolver, blurring_grid\r\n ) -> {g.Galaxy: np.ndarray}:\r\n\r\n galaxy_blurred_image_dict = dict()\r\n\r\n traced_grids_of_planes = self.traced_grids_of_planes_from_grid(grid=grid)\r\n\r\n traced_blurring_grids_of_planes = self.traced_grids_of_planes_from_grid(\r\n grid=blurring_grid\r\n )\r\n\r\n for (plane_index, plane) in enumerate(self.planes):\r\n blurred_images_of_galaxies = plane.blurred_images_of_galaxies_from_grid_and_convolver(\r\n grid=traced_grids_of_planes[plane_index],\r\n convolver=convolver,\r\n blurring_grid=traced_blurring_grids_of_planes[plane_index],\r\n )\r\n for (galaxy_index, galaxy) in enumerate(plane.galaxies):\r\n galaxy_blurred_image_dict[galaxy] = blurred_images_of_galaxies[\r\n galaxy_index\r\n ]\r\n\r\n return galaxy_blurred_image_dict", "def wrapper(profile, grid, *args, **kwargs):\r\n\r\n if isinstance(grid, grids.Grid2DIterate):\r\n mask = grid.mask.mask_new_sub_size_from_mask(\r\n mask=grid.mask, sub_size=max(grid.sub_steps)\r\n )\r\n grid_compute = grids.Grid2D.from_mask(mask=mask)\r\n result_list = func(profile, grid_compute, *args, **kwargs)\r\n result_list = [\r\n grid_compute.structure_from_result(result=result)\r\n for result in result_list\r\n ]\r\n result_list = [result.slim_binned for result in result_list]\r\n return grid.grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2DInterpolate):\r\n return func(profile, grid, *args, **kwargs)\r\n elif isinstance(grid, grids.Grid2DIrregular):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2D):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n\r\n if not isinstance(grid, grids.Grid2DIrregular) and not isinstance(\r\n grid, grids.Grid2D\r\n ):\r\n return func(profile, grid, *args, **kwargs)", "def blurImage2(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n gaussian_kernel = cv2.getGaussianKernel(kernel_size[0], sigma=0)\r\n out_img = cv2.filter2D(in_image, -1, gaussian_kernel)\r\n return out_img", "def convert_blur_kernel(state_ros, state_nv, level):\n # They are all the same\n state_ros[f\"convs.{2*level}.conv.blur.kernel\"] = 4*state_nv[\"synthesis.b4.resample_filter\"]\n state_ros[f\"to_rgbs.{level}.upsample.kernel\"] = 4*state_nv[\"synthesis.b4.resample_filter\"]", "def blurImage1(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n size = kernel_size[0]\r\n sigma = 1\r\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\r\n normal = 1 / (2.0 * np.pi * sigma ** 2)\r\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normal\r\n in_image = cv2.filter2D(in_image, -1, g)\r\n return in_image", "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d", "def linear_box_blur(grid):\n \n blurred_grid = grid\n \n for i in range(GRID_WIDTH):\n for j in range(GRID_HEIGHT):\n # Average value of surrounding tiles\n total = 0\n num_totaled = 0\n \n for k in range(len(DX)):\n # Coords of tile to add into average\n x = i + DX[k]\n y = j + DY[k]\n \n # Only average it if on the grid\n if 0 <= x < GRID_WIDTH and 0 <= y < GRID_HEIGHT:\n total += grid[x][y]\n num_totaled += 1\n \n # Compute average\n blurred_grid[i][j] = total / num_totaled\n \n return blurred_grid", "def padded_grid_from(self, kernel_shape_native: Tuple[int, int]) -> \"Grid2DIterate\":\r\n shape = self.mask.shape\r\n\r\n padded_shape = (\r\n shape[0] + kernel_shape_native[0] - 1,\r\n shape[1] + kernel_shape_native[1] - 1,\r\n )\r\n\r\n padded_mask = Mask2D.all_false(\r\n shape_native=padded_shape,\r\n pixel_scales=self.mask.pixel_scales,\r\n sub_size=self.mask.sub_size,\r\n )\r\n\r\n return Grid2DIterate.from_mask(\r\n mask=padded_mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def get_kernel(kernel_size, blur=1 / 20, halo=.001):\n\n # generate x and y grids\n x, y = np.mgrid[0:kernel_size * 2 + 1, 0:kernel_size * 2 + 1]\n\n center = kernel_size + 1 # center pixel\n r = np.sqrt((x - center) ** 2 + (y - center) ** 2) # distance from center\n\n # now compute the kernel. This function is a bit arbitrary.\n # adjust this to get the effect you want.\n kernel = np.exp(-r / kernel_size / blur) + (1 - r / r[center, 0]).clip(0) * halo\n return kernel", "def interp_2D(data_grid, info=ProcessInfo()):\n print('Interpolating data...')\n meth = info.INTERP_METHOD # Get method for interpolating\n print(meth)\n\n xy_grid = np.nonzero(data_grid)\n z_grid = data_grid[xy_grid]\n grid_x, grid_y = np.mgrid[0:1000:1000j, 0:1000:1000j]\n interp_grid = interpolate.griddata(xy_grid, z_grid, (grid_x, grid_y), method=meth)\n\n return interp_grid", "def interp_grid(\n old_model_obj,\n new_model_obj,\n shift_east=0,\n shift_north=0,\n pad=1,\n dim=\"2d\",\n smooth_kernel=None,\n):\n\n if dim == \"2d\":\n north, east = np.broadcast_arrays(\n old_model_obj.grid_north[:, None] + shift_north,\n old_model_obj.grid_east[None, :] + shift_east,\n )\n\n # 2) do a 2D interpolation for each layer, much faster\n new_res = np.zeros(\n (\n new_model_obj.grid_north.shape[0],\n new_model_obj.grid_east.shape[0],\n new_model_obj.grid_z.shape[0],\n )\n )\n\n for zz in range(new_model_obj.grid_z.shape[0]):\n try:\n old_zz = np.where(old_model_obj.grid_z >= new_model_obj.grid_z[zz])[0][\n 0\n ]\n except IndexError:\n old_zz = -1\n\n print \"New depth={0:.2f}; old depth={1:.2f}\".format(\n new_model_obj.grid_z[zz], old_model_obj.grid_z[old_zz]\n )\n\n new_res[:, :, zz] = spi.griddata(\n (north.ravel(), east.ravel()),\n old_model_obj.res_model[:, :, old_zz].ravel(),\n (new_model_obj.grid_north[:, None], new_model_obj.grid_east[None, :]),\n method=\"linear\",\n )\n\n new_res[0:pad, pad:-pad, zz] = new_res[pad, pad:-pad, zz]\n new_res[-pad:, pad:-pad, zz] = new_res[-pad - 1, pad:-pad, zz]\n new_res[:, 0:pad, zz] = (\n new_res[:, pad, zz].repeat(pad).reshape(new_res[:, 0:pad, zz].shape)\n )\n new_res[:, -pad:, zz] = (\n new_res[:, -pad - 1, zz]\n .repeat(pad)\n .reshape(new_res[:, -pad:, zz].shape)\n )\n\n if smooth_kernel is not None:\n new_res[:, :, zz] = smooth_2d(new_res[:, :, zz], smooth_kernel)\n\n elif dim == \"3d\":\n # 1) first need to make x, y, z have dimensions (nx, ny, nz), similar to res\n north, east, vert = np.broadcast_arrays(\n old_model_obj.grid_north[:, None, None],\n old_model_obj.grid_east[None, :, None],\n old_model_obj.grid_z[None, None, :],\n )\n\n # 2) next interpolate ont the new mesh (3D interpolation, slow)\n new_res = spi.griddata(\n (north.ravel(), east.ravel(), vert.ravel()),\n old_model_obj.res_model.ravel(),\n (\n new_model_obj.grid_north[:, None, None],\n new_model_obj.grid_east[None, :, None],\n new_model_obj.grid_z[None, None, :],\n ),\n method=\"linear\",\n )\n\n print \"Shape of new res = {0}\".format(new_res.shape)\n return new_res", "def GetKernel(self) -> \"itkFlatStructuringElement2 const &\":\n return _itkClosingByReconstructionImageFilterPython.itkClosingByReconstructionImageFilterIF2IF2SE2_GetKernel(self)", "def blur_spatial(im, kernel_size):\n kernel = gaus_kernel_calc(kernel_size)\n\n return scipy.signal.convolve2d(im, kernel, 'same').astype(np.float64)", "def slim(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n )", "def grid_evaluation(self,F1,F2,n_samp=5000):\n assert len(F1.shape)==1, 'input must be 1d ndarray'\n assert len(F2.shape)==1, 'input must be 1d ndarray'\n assert (F1.shape == F2.shape)\n n1 = len(F1)\n n2 = len(F2)\n f1,f2 = np.meshgrid(F1,F2)\n B = self.bern(f1.flatten(),f2.flatten(),n_samp=n_samp).reshape(n1,n2)\n return B", "def _TwoDMeshGrid(self, num_points, lattice_sizes, input_dims):\n if input_dims != 2:\n raise ValueError(\"2-d mesh grid is possible only for 2-d lattice. Lattice\"\n \" dimension given: %s\" % input_dims)\n return test_utils.two_dim_mesh_grid(\n num_points=num_points,\n x_min=0.0,\n y_min=0.0,\n x_max=lattice_sizes - 1.0,\n y_max=lattice_sizes - 1.0)", "def blur(image, kernel_size=(7, 7)):\n image = cv2.blur(image, kernel_size)\n return image", "def blur(image, kernel_size=(7, 7)):\n image = cv2.blur(image, kernel_size)\n return image", "def filter2D(img, kernel = (5,5)):\n\ttmp = img.copy()\n\tk = np.ones((kernel[0], kernel[1]), np.float32) / (kernel[0]*kernel[1])\n\tdst = cv2.filter2D(tmp, -1, k)\n\treturn dst" ]
[ "0.7138937", "0.7138747", "0.69914216", "0.63918656", "0.6009551", "0.5876295", "0.5850692", "0.5623698", "0.5560852", "0.5542704", "0.5464336", "0.54251134", "0.5424638", "0.5364959", "0.5306676", "0.52881783", "0.5256264", "0.5230065", "0.5176634", "0.51338965", "0.506968", "0.5061655", "0.5045858", "0.5045113", "0.5022292", "0.50089705", "0.5006474", "0.49832112", "0.49832112", "0.4962904" ]
0.7158281
0
Returns the resulting iterated array, by mapping it to 1D and then passing it back as an ``Array2D`` structure.
def return_iterated_array_result(self, iterated_array: Array2D) -> Array2D: iterated_array_1d = array_2d_util.array_2d_slim_from( mask_2d=self.mask, array_2d_native=iterated_array, sub_size=1 ) return Array2D(values=iterated_array_1d, mask=self.mask.derive_mask.sub_1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getArray2d(self):\n\t\treturn self.array2d", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def transform(self, x: Array2D) -> Array2D:", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def __iter__(self):\n for k in range(len(self)):\n yield np.ravel(self.xyz[k, :])", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def array (self, length, width):\n\t\treturn [[0 for i in range(width)] for j in range(length)] #List comprehensions (Works like two for loops)", "def iflatten(self):\n return _((e for es in self.array for e in es))", "def make_2d(x):\n return x.reshape((1, len(x)))", "def f1to2(x):\n assert_equal(x.ndim, 1)\n return (x[::-1] * x[1:,None]).view(cls)", "def _to_array2(self, maps, norb):\n nstate = len(maps[(0,)])\n arrays = numpy.zeros((norb, nstate, 3), dtype=numpy.int32)\n for i in range(norb):\n for k, data in enumerate(maps[(i,)]):\n arrays[i, k, 0] = data[0]\n arrays[i, k, 1] = data[1]\n arrays[i, k, 2] = data[2]\n return arrays", "def getBooleanArray2D(self) -> typing.List[typing.List[bool]]:\n ...", "def getDoubleArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def __iter__(self):\n return self.array", "def to_1d_array(self):\n return reshape_fns.to_1d(self._obj, raw=True)", "def __array__(self):\n return self.to_array()", "def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def _to_array1(self, maps, norb):\n nstate = len(maps[(0, 1)])\n nlt = norb * (norb + 1) // 2\n arrays = numpy.zeros((nlt, nstate, 3), dtype=numpy.int32)\n for i in range(norb):\n for j in range(i + 1, norb):\n ijn = i + j * (j + 1) // 2\n for k, data in enumerate(maps[(i, j)]):\n arrays[ijn, k, 0] = data[0]\n arrays[ijn, k, 1] = data[1]\n arrays[ijn, k, 2] = data[2]\n return arrays", "def ndarray(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n data = self.clear().data.collect()\n\n result = np.zeros(self._shape, dtype=self._dtype)\n\n for e in data:\n result[e[0], e[1]] = e[2]\n\n return result", "def flatten(self):\n\n if self.ndim == 1:\n return self.copy()\n\n return ArrayCoordinates1d(self.coordinates.flatten(), **self.properties)", "def copy(self):\n\n return ArrayCoordinates1d(self.coordinates, **self.properties)", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def iterated_result_from(\r\n self, func: Callable, cls: object\r\n ) -> Union[Array2D, Grid2D]:\r\n result_sub_1_1d = func(cls, self.grid)\r\n result_sub_1_2d = self.grid.structure_2d_from(\r\n result=result_sub_1_1d\r\n ).binned.native\r\n\r\n if len(result_sub_1_2d.shape) == 2:\r\n return self.iterated_array_from(\r\n func=func, cls=cls, array_lower_sub_2d=result_sub_1_2d\r\n )\r\n elif len(result_sub_1_2d.shape) == 3:\r\n return self.iterated_grid_from(\r\n func=func, cls=cls, grid_lower_sub_2d=result_sub_1_2d\r\n )", "def __array__(self):\n return np.asarray(self.data)", "def as_numpy_array_2D(self):\n wx = []\n wy = []\n for wp in self.waypoints:\n wx.append(wp.location.x)\n wy.append(wp.location.y)\n return np.array([wx, wy])", "def to_numpy(self) -> np.ndarray:\n return np.stack([x.to_numpy() for x in self])", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def array(self):\n return np.asarray(self)" ]
[ "0.68928725", "0.67711896", "0.6701381", "0.6635095", "0.61929125", "0.61611664", "0.6145428", "0.61135894", "0.6046715", "0.6029555", "0.6016695", "0.5990024", "0.59658796", "0.59533745", "0.59407365", "0.5928865", "0.5895125", "0.5891846", "0.58794075", "0.5870737", "0.5867125", "0.5861292", "0.5830309", "0.58219075", "0.581258", "0.57428735", "0.5725504", "0.5722666", "0.5721656", "0.5714117" ]
0.7220839
0
Iterate over a function that returns a grid of values until the it meets a specified fractional accuracy. The function returns a result on a pixelgrid where evaluating it on more points on a higher resolution subgrid followed by binning lead to a more precise evaluation of the function. For the fractional accuracy of the grid to be met, both the y and x values must meet it. The function is first called for a subgrid size of 1 and a higher resolution grid. The ratio of values give the fractional accuracy of each function evaluation. Pixels which do not meet the fractional accuracy are iteratively revaulated on higher resolution subgrids. This is repeated until all pixels meet the fractional accuracy or the highest subsize specified in the sub_steps attribute is computed. If the function return all zeros, the iteration is terminated early given that all levels of subgridding will return zeros. This occurs when a function is missing optional objects that contribute to the calculation. An example use case of this function is when a "deflections_yx_2d_from" methods in PyAutoLens's ``MassProfile`` module is computed, which by evaluating the function on a higher resolution subgrid samples the analytic mass profile at more points and thus more precisely.
def iterated_grid_from( self, func: Callable, cls: object, grid_lower_sub_2d: Grid2D ) -> Grid2D: if not np.any(grid_lower_sub_2d): return grid_lower_sub_2d.slim iterated_grid = np.zeros(shape=(self.shape_native[0], self.shape_native[1], 2)) threshold_mask_lower_sub = self.mask for sub_size in self.sub_steps[:-1]: grid_higher_sub = self.grid_at_sub_size_from( func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=sub_size ) threshold_mask_higher_sub = self.threshold_mask_via_grids_from( grid_lower_sub_2d=grid_lower_sub_2d, grid_higher_sub_2d=grid_higher_sub ) iterated_grid = self.iterated_grid_jit_from( iterated_grid=iterated_grid, threshold_mask_higher_sub=threshold_mask_higher_sub, threshold_mask_lower_sub=threshold_mask_lower_sub, grid_higher_sub_2d=grid_higher_sub, ) if threshold_mask_higher_sub.is_all_true: iterated_grid_1d = grid_2d_util.grid_2d_slim_from( mask=self.mask, grid_2d_native=iterated_grid, sub_size=1 ) return Grid2D(values=iterated_grid_1d, mask=self.mask.derive_mask.sub_1) grid_lower_sub_2d = grid_higher_sub threshold_mask_lower_sub = threshold_mask_higher_sub grid_higher_sub = self.grid_at_sub_size_from( func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=self.sub_steps[-1], ) iterated_grid_2d = iterated_grid + grid_higher_sub.binned.native iterated_grid_1d = grid_2d_util.grid_2d_slim_from( mask=self.mask, grid_2d_native=iterated_grid_2d, sub_size=1 ) return Grid2D(values=iterated_grid_1d, mask=self.mask.derive_mask.sub_1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)", "def defint(func, start, stop, accuracy=0.0001, strict=False):\n if strict:\n step = accuracy\n endpoint = int((stop-start)/accuracy)+1\n else:\n start += accuracy\n step = (stop-start)*accuracy\n endpoint = int(1/accuracy)\n out = 0\n for x in xrange(0, endpoint):\n point = func(start)\n if x == 0 or x == endpoint:\n point /= 2\n out += step*point\n start += step\n return out", "def test_bounds_respected_func_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_called()", "def recursion_loop(pulls, discount, grid_n):\n\n r_grid = np.linspace(0, 1, grid_n)\n gittins, values = initial_approximation(pulls, discount, grid_n)\n n = pulls - 2 # Note that the 2 comes from (1) the initial approximation and (2) python indexing\n while n >= 1:\n g, v = recursion_step(values[:n + 1, n, :], r_grid, discount)\n values[:n, n - 1] = v\n gittins[:n, n - 1] = g\n n -= 1\n return gittins, values", "def iterate(self, batch_size=8, func=None):\n raise NotImplementedError()", "def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I", "def eval_func_on_grid(f, re, im, N):\n l = re[1] - re[0]\n h = im[1] - im[0]\n resL = N*l #horizontal resolution\n resH = N*h #vertical resolution\n x = np.linspace(re[0], re[1],resL)\n y = np.linspace(im[0], im[1], resH)\n x, y = np.meshgrid(x,y)\n z = x + 1j*y\n w = f(z)\n return w", "def find_all_zeros(min_re, max_re, min_im, max_im, fn,\r\n grid_points, iterations, reduction_factor,\r\n plot_full_region, show_progress):\r\n # Check arguments\r\n assert reduction_factor > 1 and max_re > min_re and max_im > min_im\r\n assert (max_re.imag == 0 and min_re.imag == 0\r\n and max_im.imag == 0 and min_im.imag == 0)\r\n # Edge-point rejection (see below) relies on the following assumption:\r\n assert grid_points > 2 * reduction_factor\r\n \r\n \r\n if plot_full_region:\r\n \r\n def inverse_fn(z):\r\n \"\"\" 1 / fn(z) \"\"\"\r\n f = fn(z)\r\n return inf if f == 0 else 1/f\r\n \r\n def contour_int(z, d_re, d_im):\r\n \"\"\"\r\n Approximate the contour integral of inverse_fn around a point z,\r\n using a rectangle of half-width d_re (in real direction) and\r\n half-height d_im. Just a nice plot that makes zeros stand out.\r\n \"\"\"\r\n assert d_re.imag == 0 and d_im.imag == 0 and d_re > 0 and d_im > 0\r\n below = inverse_fn(z - 1j * d_im)\r\n above = inverse_fn(z + 1j * d_im)\r\n left = inverse_fn(z - d_re)\r\n right = inverse_fn(z + d_re)\r\n return (below * (2 * d_re) + right * (2j * d_im)\r\n + above * (-2 * d_re) + left * (-2j * d_im))\r\n \r\n res, re_step = np.linspace(min_re, max_re, num=100, retstep=True)\r\n ims, im_step = np.linspace(min_im, max_im, num=100, retstep=True)\r\n \r\n fig = plt.figure()\r\n direct_plot = fig.add_subplot(111)\r\n data = [[math.log10(abs(fn(re + 1j * im))) for re in res] for im in ims]\r\n direct_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,\r\n min_im * nu.um, max_im * nu.um),\r\n origin='lower')\r\n direct_plot.set_xlabel('Re(kx) [rad/um]')\r\n direct_plot.set_ylabel('Im(kx) [rad/um]')\r\n direct_plot.set_title('log(|fn(z)|) -- Looking for minima (blue)')\r\n\r\n fig = plt.figure()\r\n contour_plot = fig.add_subplot(111)\r\n data = [[-math.log10(abs(contour_int(re + 1j * im, re_step, im_step)))\r\n for re in res] for im in ims]\r\n contour_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,\r\n min_im * nu.um, max_im * nu.um),\r\n origin='lower')\r\n contour_plot.set_xlabel('Re(kx) [rad/um]')\r\n contour_plot.set_ylabel('Im(kx) [rad/um]')\r\n contour_plot.set_title(\r\n '-log(|contour integral of 1/fn(z) around a little rectangle|)\\n'\r\n + ' -- This plot highlights zeros in fn(z), but also lines of\\n'\r\n + 'discontinuity (where top or bottom kz is pure-imaginary)')\r\n \r\n # \"regions\" is a list where each entry has the form\r\n # [min_re, max_re, min_im, max_im]. Each entry describes a region in which we\r\n # are seeking local minima.\r\n regions = [[min_re, max_re, min_im, max_im]]\r\n \r\n region_width_re = max_re - min_re\r\n region_width_im = max_im - min_im\r\n \r\n for iteration_number in range(iterations):\r\n # all_local_mins will be a list of (x, y) for every local minimum in\r\n # every region. This is used to generate the next iteration.\r\n all_local_mins = []\r\n for region_index in range(len(regions)):\r\n min_re_now, max_re_now, min_im_now, max_im_now = regions[region_index]\r\n results_grid = []\r\n re_list, re_step = np.linspace(min_re_now, max_re_now, num=grid_points, retstep=True)\r\n im_list, im_step = np.linspace(min_im_now, max_im_now, num=grid_points, retstep=True)\r\n fn_to_minimize = lambda z : abs(fn(z))\r\n \r\n results_grid = [[fn_to_minimize(re + 1j * im) for im in im_list]\r\n for re in re_list]\r\n results_grid = np.array(results_grid)\r\n # local_mins will be a list of (i,j) where (re_list[i], im_list[j])\r\n # is a local minimum on the results_grid\r\n local_mins = []\r\n for i in range(grid_points):\r\n for j in range(grid_points):\r\n is_min = all(results_grid[i2, j2] >= results_grid[i,j]\r\n for i2 in [i-1, i, i+1]\r\n for j2 in [j-1, j, j+1]\r\n if (0 <= i2 < grid_points\r\n and 0 <= j2 < grid_points))\r\n if is_min:\r\n local_mins.append((i,j))\r\n # local_mins_OK is the subset of local_mins that passes the\r\n # the edge-rejection test.\r\n # The edge-rejection test says that after the 0'th iteration, any\r\n # point at an edge is probably not a true minimum.\r\n \r\n local_mins_OK = []\r\n for (i,j) in local_mins:\r\n z_now = re_list[i] + 1j * im_list[j]\r\n if iteration_number >= 2 and (i == 0 or j == 0 or\r\n i == grid_points-1 or j == grid_points-1):\r\n # Rejecting an edge point...\r\n if show_progress:\r\n print('----')\r\n print('Deleting edge point: region #'\r\n + str(region_index+1) + ' (i,j)=', (i,j),\r\n ' kx in rad/um=',\r\n z_now / nu.um**-1,\r\n ' fn(z)=', fn(z_now))\r\n else:\r\n local_mins_OK.append((i,j))\r\n \r\n # Add local_mins_OK entries into all_local_mins\r\n for (i,j) in local_mins_OK:\r\n all_local_mins.append(re_list[i] + 1j * im_list[j])\r\n \r\n if show_progress:\r\n print('----')\r\n print('iter #' + str(iteration_number)\r\n + ' , region #' + str(region_index+1) + ' of ' + str(len(regions))\r\n + ' , ' + str(len(local_mins_OK)) + ' minima')\r\n if len(local_mins_OK) > 0:\r\n print('For each, here is ((i, j), kx in rad/um, fn(kx)):')\r\n print([((i, j), (re_list[i] + 1j * im_list[j]) / nu.um**-1,\r\n fn(re_list[i] + 1j * im_list[j]))\r\n for (i,j) in local_mins_OK])\r\n\r\n # Now we've gone through every region.\r\n # Delete redundant minima that showed up in overlapping regions.\r\n all_local_mins_norepeat = []\r\n def is_repeat(z1, z2):\r\n return ((abs((z1 - z2).real) <= 0.5 * re_step) and\r\n (abs((z1 - z2).imag) <= 0.5 * im_step))\r\n for z_now in all_local_mins:\r\n if not any(is_repeat(z_now, z) for z in all_local_mins_norepeat):\r\n all_local_mins_norepeat.append(z_now)\r\n if show_progress:\r\n num_deleted = len(all_local_mins) - len(all_local_mins_norepeat)\r\n if num_deleted > 0:\r\n print('----')\r\n print('After iter #' + str(iteration_number)\r\n + ', deleted ' + str(num_deleted) + ' redundant point(s)')\r\n\r\n all_local_mins = all_local_mins_norepeat\r\n \r\n if show_progress:\r\n print('----')\r\n print('** After iter #' + str(iteration_number) + ', we have '\r\n + str(len(all_local_mins)) + ' candidate minima')\r\n \r\n region_width_re /= reduction_factor\r\n region_width_im /= reduction_factor\r\n \r\n regions = [[z.real - region_width_re / 2, z.real + region_width_re / 2,\r\n z.imag - region_width_im / 2, z.imag + region_width_im / 2]\r\n for z in all_local_mins]\r\n \r\n # Done with main algorithm. Show the discovered minima on the plots as\r\n # white X's. Note: Zeros outside the plot region will not be seen here,\r\n # but the function still returns them.\r\n if plot_full_region:\r\n # Keep the image filling the plot area\r\n direct_plot.autoscale(False)\r\n contour_plot.autoscale(False)\r\n for z in all_local_mins:\r\n direct_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')\r\n contour_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')\r\n return all_local_mins", "def _compute_general_continued_fraction(\n max_iterations,\n numerator_denominator_args_list,\n tolerance=None,\n partial_numerator_fn=None,\n partial_denominator_fn=None,\n dtype=tf.float32,\n name=None):\n with tf.name_scope(name or 'continued_fraction'):\n dtype = dtype_util.common_dtype(\n numerator_denominator_args_list, dtype)\n\n if (partial_numerator_fn is None) and (partial_denominator_fn is None):\n raise ValueError('Expect one of `partial_numerator_fn` and '\n '`partial_denominator_fn` to be set.')\n\n def _continued_fraction_one_step(\n unused_should_stop,\n numerator,\n previous_numerator,\n denominator,\n previous_denominator,\n iteration_count):\n partial_denominator = 1.\n if partial_denominator_fn:\n partial_denominator = partial_denominator_fn(\n iteration_count, *numerator_denominator_args_list)\n new_numerator = partial_denominator * numerator\n new_denominator = partial_denominator * denominator\n\n partial_numerator = 1.\n if partial_numerator_fn:\n partial_numerator = partial_numerator_fn(\n iteration_count, *numerator_denominator_args_list)\n new_numerator = new_numerator + partial_numerator * previous_numerator\n new_denominator = (\n new_denominator + partial_numerator * previous_denominator)\n\n should_stop_next = iteration_count > max_iterations\n\n if tolerance is not None:\n # We can use a more efficient computation when the partial numerators\n # are 1.\n if partial_numerator_fn is None:\n # We now want to compute to relative error between the fraction at\n # this iteration, vs. the previous iteration.\n # Let h_i be the numerator and k_i the denominator, and a_i be the\n # i-th term.\n # h_i / k_i - h_{i-1} / k_{i-1} =\n # (h_i * k_{i - 1} - h_{i - 1} * k_i) / (k_i * k_{i - 1}) =\n # ((a_i h_{i - 1} + h_{i - 2}) * k_{i - 1} -\n # (a_i k_{i - 1} + k_{i - 2}) * h_{i - 1}) / (k_i * k_{i - 1}) =\n # -(h_{i - 1} * k_{i - 2} - h_{i - 2} * k_{i - 1}) / (k_i * k_{i - 1})\n # This suggests we should prove something about the numerator\n # inductively, and indeed\n # (h_i * k_{i - 1} - h_{i - 1} * k_i) = (-1)**i\n delta = tf.math.reciprocal(new_denominator * denominator)\n # We actually need to compute the difference of fractions.\n else:\n delta = new_numerator / new_denominator - numerator / denominator\n\n converged = tf.math.abs(delta) <= tolerance\n should_stop_next = tf.reduce_all(converged) | should_stop_next\n return (should_stop_next,\n new_numerator,\n numerator,\n new_denominator,\n denominator,\n iteration_count + 1.)\n\n # This is to infer the correct shape of tensors\n if partial_denominator_fn:\n term = partial_denominator_fn(1., *numerator_denominator_args_list)\n else:\n term = partial_numerator_fn(1., *numerator_denominator_args_list)\n\n zeroth_numerator = tf.ones_like(term, dtype=dtype)\n zeroth_denominator = tf.zeros_like(term, dtype=dtype)\n first_numerator = tf.zeros_like(term, dtype=dtype)\n first_denominator = tf.ones_like(term, dtype=dtype)\n\n results = tf.while_loop(\n cond=lambda stop, *_: ~stop,\n body=_continued_fraction_one_step,\n loop_vars=(\n False,\n first_numerator,\n zeroth_numerator,\n first_denominator,\n zeroth_denominator,\n tf.cast(1., dtype=dtype)))\n return results[1] / results[3]", "def iterated_result_from(\r\n self, func: Callable, cls: object\r\n ) -> Union[Array2D, Grid2D]:\r\n result_sub_1_1d = func(cls, self.grid)\r\n result_sub_1_2d = self.grid.structure_2d_from(\r\n result=result_sub_1_1d\r\n ).binned.native\r\n\r\n if len(result_sub_1_2d.shape) == 2:\r\n return self.iterated_array_from(\r\n func=func, cls=cls, array_lower_sub_2d=result_sub_1_2d\r\n )\r\n elif len(result_sub_1_2d.shape) == 3:\r\n return self.iterated_grid_from(\r\n func=func, cls=cls, grid_lower_sub_2d=result_sub_1_2d\r\n )", "def geometric_progression_for_stepsize(\n self, x, update, dist, decision_function, current_iteration\n ):\n epsilon = dist / np.sqrt(current_iteration)\n while True:\n updated = np.clip(x + epsilon * update, self.clip_min, self.clip_max)\n success = decision_function(updated[None])[0]\n if success:\n break\n else:\n epsilon = epsilon / 2.0 # pragma: no cover\n\n return epsilon", "def _fp_evaluate(sequence, iteration, tolerance):\n return np.abs(sequence[iteration] - sequence[iteration - 1]) < tolerance", "def integrate_simpson_adaptive(f, lower, upper, accuracy=1e-15, detailed=False):\n a = lower # Lower limit of integration\n b = upper # Upper limit of integration\n Tol = accuracy # Error tolerance\n error = 1.0 # Error needs to be initialized as something\n N = 1 # Start with 1 slice\n\n # I0 is the approximation with a single slice\n I0 = ((b-a)/6) * (f(a) + 4 * f((b - a)/2) + f(b)) # Area of first and last slice\n\n # Do you want detailed results?\n if detailed is True:\n print(\"N = \", N, \", I = \", I0, sep=\"\")\n\n while error > Tol:\n\n N *= 2\n w = (b - a) / N # Width of each slice\n\n I = 0.5 * I0\n for i in range(1, N, 2):\n I += -f(a + w * i) * w / 3\n for i in range(0, N):\n I += (2 / 3) * f(a + w * (i + 1 / 2)) * w\n\n error = abs(I - I0)/15\n\n I0 = I\n\n # Do you want detailed results?\n if detailed is True:\n print(\"N = \", N, \", I = \", I, \", Error = \", error, sep=\"\")\n\n return I, N, error", "def test_bounds_respected_func_not_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n self.controller.flag_expected = [3]\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_not_called()", "def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def search_true_instance(f, a, b, precision_digits=3, maxiter=10, log=None):\n log = logging.getLogger('search_true_instance')\n\n values_searched = [a, b]\n log.debug(\"Starting exploratory search in [%s, %s]\" % (a, b))\n\n for iter_i in range(maxiter):\n # First test halfway, point then 1/4 and 3/4, then 1/8, 3/8, 5/8, 7/8, etc.\n fractions = 2**(iter_i + 1)\n search_points = [round_to_digits(a + (b - a)*fr, precision_digits)\n for fr in np.arange(1, fractions, 2)/fractions]\n log.debug(\"Searching %s - %s (%d points)\" % (search_points[0], search_points[-1], len(search_points)))\n\n for x_i, x in enumerate(search_points):\n if f(x):\n values_searched = np.array(values_searched)\n return x, np.max(values_searched[values_searched < x]), np.min(values_searched[values_searched > x])\n else:\n values_searched.append(x)\n\n if len(search_points) > 1 and np.any(np.diff(search_points) == 0):\n raise SearchFailedException(\"No true value found in search region [%s, %s], \"\n \"but search depth now lower than precision digits (%s). \"\n \"Iteration %d.\" % (a, b, precision_digits, iter_i))\n\n raise ValueError(\"Exploratory search failed to converge or terminate - bug? excessive precision?\")", "def instability_bf(funcs, step = 10, maximum = 300, guess = 0, tolerance=0.01):\n if guess < maximum:\n s = 1 # to increase\n else:\n s = -1 # to decrease\n step = s*abs(step) # correct step\n # offset to ensure that data moves to maximum even if actual data is stable\n offset = [f(maximum) for f in funcs]\n val_prev = np.array([f(guess-step) for f in funcs]+offset) # first y values with offset\n acc = 0 # accumulator to interchange when to add offset and when not\n while s*(maximum-guess)>0: # check approximation to maximum\n val = [f(guess) for f in funcs] # get y values\n if acc%2: # interchange\n val = np.array(val+offset) # values with offset\n else:\n val = np.array(val+val) # just values\n # np.repeat(np.mean(val),val.size)\n # check minimization\n if np.allclose(val, val_prev, tolerance, tolerance): # it means instability\n return True, guess # success!\n guess += step # updata step\n acc += 1 # update accumulator\n val_prev = val # update previous data\n return False, guess # not found or limit reached", "def iterate(self, update_func):\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def value_iteration_on_grid_world() -> PolicyAndValueFunction:\n result = get_value_iteration(grid_world, 0.99, 0.01)\n export_to_json(result.pi, 'value_iteration_grid_world')\n return result", "def wrapper(profile, grid, *args, **kwargs):\r\n\r\n if isinstance(grid, grids.Grid2DIterate):\r\n mask = grid.mask.mask_new_sub_size_from_mask(\r\n mask=grid.mask, sub_size=max(grid.sub_steps)\r\n )\r\n grid_compute = grids.Grid2D.from_mask(mask=mask)\r\n result_list = func(profile, grid_compute, *args, **kwargs)\r\n result_list = [\r\n grid_compute.structure_from_result(result=result)\r\n for result in result_list\r\n ]\r\n result_list = [result.slim_binned for result in result_list]\r\n return grid.grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2DInterpolate):\r\n return func(profile, grid, *args, **kwargs)\r\n elif isinstance(grid, grids.Grid2DIrregular):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n elif isinstance(grid, grids.Grid2D):\r\n result_list = func(profile, grid, *args, **kwargs)\r\n return grid.structure_list_from_result_list(result_list=result_list)\r\n\r\n if not isinstance(grid, grids.Grid2DIrregular) and not isinstance(\r\n grid, grids.Grid2D\r\n ):\r\n return func(profile, grid, *args, **kwargs)", "def iterate(self, update_func):\n if self.disc_param_save_on_error:\n update_func = param_file.wrap_with_save_on_error(\n self.gan.discriminator,\n self.datastore.path('disc_param', 'pre_error.npz'),\n self.datastore.path('disc_param', 'post_error.npz'),\n )(update_func)\n\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def integrate_trapz_adaptive(f, lower, upper, accuracy=1e-8, detailed=False):\n a = lower # Lower limit of integration\n b = upper # Upper limit of integration\n Tol = accuracy # Error tolerance\n error = 1 # Error needs to be initialized as something\n N = 1 # Start with 1 trapezoid\n\n # I0 is the approximation with a single trapezoid\n I0 = (f(a) + f(b))*(b-a)*0.5\n\n # Do you want detailed results?\n if detailed is True:\n print(\"N = \", N, \", I = \", I0, sep=\"\")\n\n while error > Tol: # Repeat until the error is less than the error tolerance\n\n N *= 2 # Double the slices each iteration\n w = (b - a)/N # Width of each trapezoid\n\n I = 0.5*I0\n for i in range(1, N, 2):\n I += f(a + i*w)*w\n\n error = abs(I - I0)/3\n\n I0 = I\n\n # Do you want detailed results?\n if detailed is True:\n print(\"N = \", N, \", I = \", I, \", Error = \", error, sep=\"\")\n\n return I, N, error", "def doubling_nc(f, xl: float, xr: float, n: int, tol: float, *params):\n # required local variables to return\n # S : computed value of the integral with required tolerance\n # N : number of steps for S\n # err : estimated error of S\n # iter : number of iterations (steps doubling)\n iter = 1\n N = 1\n\n if n % 2 != 0:\n m = n\n else:\n m = n - 1\n m += 1\n\n\n s1 = composite_quad(f, xl, xr, N, n, *params)\n\n # цикл по итерациям\n while iter != MAXITER+1:\n N *= 2\n\n s2 = composite_quad(f, xl, xr, N, n, *params)\n\n # берём погрешность из метода рунге\n estimate = np.abs(runge(s1, s2, 2, m))\n\n if estimate >= tol:\n s1 = s2\n else:\n return N, s2, estimate\n\n iter += 1\n\n if iter == MAXITER:\n print(\"Convergence not reached!\")\n return -1", "def integ(fun, a, b, tol, depth, endpt):\n c = 0.5*(a+b)\n approx1 = simp(fun, a, b)\n approx2 = simp(fun, a, c) + simp(fun, c, b)\n\n err_approx = (16/15)*abs(approx1 - approx2)\n\n if err_approx < tol or depth > MAX_DEPTH:\n endpt.append(a)\n return approx2\n\n integ_left = integ(fun, a, c, tol/2, depth+1, endpt)\n integ_right = integ(fun, c, b, tol/2, depth+1, endpt)\n return integ_left + integ_right", "def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False", "def bounded_aitken(f, x0, x1, y0, y1, x, yval, xtol, ytol):\n _abs = abs\n if y1 < 0.: x0, y0, x1, y1 = x1, y1, x0, y0\n dx1 = x1-x0\n dy = yval-y0\n if not (x0 < x < x1 or x1 < x < x0):\n x = x0 + dy*dx1/(y1-y0)\n yval_ub = yval + ytol\n yval_lb = yval - ytol\n while _abs(dx1) > xtol:\n y = f(x)\n if y > yval_ub:\n x1 = x\n y1 = y\n elif y < yval_lb:\n x0 = x\n y0 = y\n dy = yval-y\n else: \n return x\n dx0 = x1-x0\n g = x0 + dy*dx0/(y1-y0)\n if _abs(dx0) < xtol:\n return g\n \n y = f(g)\n if y > yval_ub:\n x1 = g\n y1 = y\n elif y < yval_lb:\n x0 = g\n y0 = y\n dy = yval-y\n else:\n return g\n dx1 = x1-x0\n gg = x0 + dy*dx1/(y1-y0)\n dxg = x - g\n try: x = x - dxg**2./(gg + dxg - g)\n except:\n # Add overshoot to prevent getting stuck\n x = gg + 0.1*(x1+x0-2*gg)*(dx1/dx0)**3. \n else:\n if not (x0 < x < x1 or x1 < x < x0):\n x = gg + 0.1*(x1+x0-2*gg)*(dx1/dx0)**3. \n return x", "def run_evaluation(self, epoch=0, global_step=0, verbose=True):\n\n # step-1, compute predictions on test set\n while True:\n try:\n preds_all = helper_utils.compute_predictions(\n self.session, self.meval, global_step, self.test_files, self.comet_exp\n )\n # If epoch trained without raising the below errors, break from loop.\n break\n except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:\n tf.logging.info('Retryable error caught: {}. Retrying.'.format(e))\n\n # step-2 evaluate on predictions\n results = helper_utils.eval_predictions(\n self.gt_depths, preds_all, global_step, min_depth=self.hparams.min_depth,\n max_depth=self.hparams.max_depth, verbose=verbose, comet_exp=self.comet_exp\n )\n return results, preds_all", "def run_epochs(self,\n fn: Callable[..., Optional[Dict[str, Any]]],\n data_generator: Iterable[ArrayTupleOrList],\n limit: Optional[int] = None,\n count: Optional[int] = None,\n metrics: Union[Sequence[str], type(ALL)] = NOT_SET,\n excludes: Sequence[str] = ()\n ) -> None:\n g = self.iter_epochs(limit=limit, count=count)\n try:\n for _ in g:\n self.run_batches(\n fn, data_generator, metrics=metrics, excludes=excludes)\n finally:\n g.close()", "def iterated_array_from(\r\n self, func: Callable, cls: object, array_lower_sub_2d: Array2D\r\n ) -> Array2D:\r\n\r\n if not np.any(array_lower_sub_2d):\r\n return array_lower_sub_2d.slim\r\n\r\n iterated_array = np.zeros(shape=self.shape_native)\r\n\r\n threshold_mask_lower_sub = self.mask\r\n\r\n for sub_size in self.sub_steps[:-1]:\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=sub_size\r\n )\r\n\r\n try:\r\n threshold_mask_higher_sub = self.threshold_mask_via_arrays_from(\r\n array_lower_sub_2d=array_lower_sub_2d,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n iterated_array = self.iterated_array_jit_from(\r\n iterated_array=iterated_array,\r\n threshold_mask_higher_sub=threshold_mask_higher_sub,\r\n threshold_mask_lower_sub=threshold_mask_lower_sub,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n except ZeroDivisionError:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n if threshold_mask_higher_sub.is_all_true:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n array_lower_sub_2d = array_higher_sub\r\n threshold_mask_lower_sub = threshold_mask_higher_sub\r\n\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func,\r\n cls=cls,\r\n mask=threshold_mask_lower_sub,\r\n sub_size=self.sub_steps[-1],\r\n )\r\n\r\n iterated_array_2d = iterated_array + array_higher_sub.binned.native\r\n\r\n return self.return_iterated_array_result(iterated_array=iterated_array_2d)", "def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out" ]
[ "0.5490733", "0.51834524", "0.51618934", "0.51331407", "0.5127401", "0.5124928", "0.50899917", "0.5055024", "0.50248444", "0.49940372", "0.49591714", "0.49212068", "0.4916253", "0.48678684", "0.4826238", "0.48227394", "0.48163268", "0.48041344", "0.4802279", "0.47982275", "0.4794496", "0.47895396", "0.47815323", "0.47788888", "0.47770053", "0.4771724", "0.47674632", "0.47633922", "0.47529274", "0.47458693" ]
0.58399826
0
Load a image as a grayscale image.
def read_gray_scale_image(data_path): return cv2.imread(data_path, cv2.IMREAD_GRAYSCALE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_gray_scale(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n if im.mode != \"L\":\r\n im = im.convert(\"L\")\r\n\r\n return img", "def grayscale(filename):\r\n image = SimpleImage(filename)\r\n for pixel in image:\r\n luminosity = compute_luminosity(pixel.red, pixel.green, pixel.blue)\r\n pixel.red = luminosity\r\n pixel.green = luminosity\r\n pixel.blue = luminosity\r\n return image", "def load_image(image_file):\n \n return _cv2.imread(image_file,_cv2.IMREAD_GRAYSCALE);", "def load_gray_img(fileName, scale=None):\n\n #img = cv2.imread(fileName, cv2.IMREAD_GRAYSCALE)\n img = cv2.imread(fileName, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n \n if(scale != None):\n img = cv2.resize(img,None,fx=scale,fy=scale)\n\n return img", "def imread(path, is_grayscale=False):\r\n\r\n if is_grayscale:\r\n return scipy.misc.imread(path, flatten=True).astype(np.float32)\r\n # img1=cv.imread(path).astype(np.float32)\r\n # return cv.cvtColor(img1,cv.COLOR_BGR2YCrCb)\r\n else:\r\n # img1=cv.imread(path).astype(np.float32)\r\n # return cv.cvtColor(img1,cv.COLOR_BGR2YCrCb)\r\n\r\n return scipy.misc.imread(path).astype(np.float32)", "def imread(path, is_grayscale=True):\n if is_grayscale:\n return imageio.imread(path, as_gray=True, pilmode='YCbCr').astype(np.float32)\n else:\n return imageio.imread(path, pilmode='YCbCr').astype(np.float32)", "def imread(path, is_grayscale=True):\n if is_grayscale:\n #flatten=True 以灰度图的形式读取 \n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)", "def imread(path, is_grayscale=True):\n if is_grayscale:\n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)", "def gray_scale_image(image, *args, **kwargs):\n # TODO: Implement the method\n\n gray_scale = num.dot(image, [0.298, 0.587, 0.114])\n\n return gray_scale", "def read_image_greyscale(path: str) -> np.ndarray:\n img = imread(path)\n if len(img.shape) > 2:\n img = np.dot(img[..., :3], [0.299, 0.587, 0.114])\n return img", "def imread(path, is_grayscale=True):\n if is_grayscale:\n # flatten=True: 形成單層的灰階通道\n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)", "def grayscale(img):\n\tif img is None:\n\t\tprint \"Img is None\"\n\t\tsys.exit()\n\tif len(img.shape) > 2:\n\t\treturn cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\telse:\n\t\treturn img", "def load_image_file(filename):\n return cv2.cvtColor(cv2.imread(filename,0),COLOR_GRAY2RGB)", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def grayscale_image(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray_image = cv2.bitwise_not(gray_image)\n\n if DEBUG:\n cv2.imwrite(\"tmp/tmp_grayscale.png\", gray_image)\n\n return gray_image", "def getGrayscaleImage(imageRGB):\n return color.rgb2gray(imageRGB)", "def gray_scale_img(img):\n if len(img.shape) == 2:\n img_gray = img.copy()\n elif len(img.shape) == 3:\n if img.shape[2] == 1:\n img_gray = img[:, :, 0].copy()\n else:\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img_gray", "def rgb2grayscale(image):\r\n\r\n assert image.ndim == 3 and image.shape[2] == 3\r\n\r\n gray_image = np.dot(image, [0.2989, 0.5870, 0.1140]).astype(np.uint8)\r\n\r\n return gray_image", "def testGrayscale(name = \"smokey.gif\"):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n grayscale(image)\n image.draw()", "def grayschale(img):\n original = skimage.io.imread(img)\n grayscale = rgb2gray(original)\n\n return grayscale", "def grayschale(img):\n original = skimage.io.imread(img)\n grayscale = rgb2gray(original)\n\n return grayscale", "def convert_grayscale(self):\n return self.image.convert(\"L\")", "def read_image(filename, grayscale=False):\n # Convert to grayscale\n if grayscale:\n return cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\n return cv2.imread(filename, cv2.IMREAD_COLOR)", "def imgfile_to_grayscale(filename):\n img = cv2.imread(filename)\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "def grayscale_image(input_image):\n return cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)", "def read_image(filename, representation):\n image = imread(filename)\n image = image.astype(np.float64)\n image /= (COLOR_SIZE-1)\n\n if representation == RGB_NUMBER:\n return image\n image_gray = rgb2gray(image)\n return image_gray", "def load_img(path, grayscale=False, target_size=None):\n if grayscale:\n img = cv2.imread(path, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n else:\n img = cv2.imread(path)\n if target_size:\n img = cv2.resize(img, (target_size[1], target_size[0]))\n return img", "def rgb2gray(img):\r\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]" ]
[ "0.7677413", "0.72421336", "0.7167619", "0.71243984", "0.70681775", "0.70260924", "0.7020777", "0.7017632", "0.6979911", "0.6973627", "0.6949091", "0.69090724", "0.6899337", "0.6897582", "0.68768406", "0.6867652", "0.68171", "0.6773599", "0.6762041", "0.6756517", "0.6756517", "0.67416596", "0.67302126", "0.66988784", "0.66424066", "0.66328317", "0.6628256", "0.66272634", "0.66240627", "0.66240627" ]
0.72882986
1
Create the test context by specifying expectation and function.
def __init__(self, expected, test_func): self._f = test_func self._exp = expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def context():\n\n class FakeContext:\n function_name = \"FUNCTION_NAME\"\n memory_limit_in_mb = 1024\n invoked_function_arn = \"INVOKED_FUNCTION_ARN\"\n aws_request_id = \"AWS_REQUEST_ID\"\n log_group_name = \"LOG_GROUP_NAME\"\n log_stream_name = \"LOG_STREAM_NAME\"\n\n def get_remaining_time_in_millis(self):\n # 5 minutes\n return 300000\n\n return FakeContext()", "def test_context(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_3(func)), contextlib._GeneratorContextManager)", "def _create_input_test(test_src, tested_function, options=None):\n def do_test_expected(self):\n \"\"\"\n Execute a test by calling a tested_function on test_src data.\n \"\"\"\n self.maxDiff = None\n\n got = \"\"\n if 'error' in test_src:\n self.assertRaises(test_src['error'], tested_function,\n test_src['in'], options)\n else:\n want = test_src['out']\n got = tested_function(test_src['in'], options)\n logging.debug('got = type %s', type(got))\n logging.debug(\"test_src['out'] = %s\",\n unicode(test_src['out']))\n self.assertEqual(got, want, \"\"\"Result matches\n expected = %s\n\n observed = %s\n \"\"\" % (want, got))\n\n return do_test_expected", "def test_setup(funct):\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup", "def test_execute_with_context(self):\n pass", "def test(self, func):\r\n @wraps(func)\r\n def wrapper():\r\n with nested(self._contexts) as context:\r\n context = [c for c in context if c is not None]\r\n argc = len(inspect.getargspec(func)[0])\r\n args = []\r\n for arg in context:\r\n if type(arg) is tuple: # type() is intentional\r\n args.extend(arg)\r\n else:\r\n args.append(arg)\r\n func(*args[:argc])\r\n wrapper.__wrapped__ = func\r\n self._tests.append(wrapper)\r\n if self.replace_tests:\r\n return wrapper\r\n return func", "def setUp(self):\n # Mocking popen\n self.popen_patcher = patch(\"pyfreesurfer.wrapper.subprocess.Popen\")\n self.mock_popen = self.popen_patcher.start()\n mock_process = mock.Mock()\n attrs = {\n \"communicate.return_value\": (\"mock_OK\", \"mock_NONE\"),\n \"returncode\": 0\n }\n mock_process.configure_mock(**attrs)\n self.mock_popen.return_value = mock_process\n\n # Mocking set environ\n self.env_patcher = patch(\n \"pyfreesurfer.wrapper.FSWrapper._freesurfer_version_check\")\n self.mock_env = self.env_patcher.start()\n self.mock_env.return_value = {}\n\n # Define function parameters\n self.kwargs = {\n \"fsdir\": \"/my/path/mock_fsdir\",\n \"anatfile\": \"/my/path/mock_anat\",\n \"sid\": \"Lola\",\n \"reconstruction_stage\": \"all\",\n \"resume\": False,\n \"t2file\": \"/my/path/mock_t2\",\n \"flairfile\": None,\n \"fsconfig\": \"/my/path/mock_fsconfig\"\n }", "def test_main_secret_file_parameters(self, mock_context, mock_create_aws, mock_gen):\n context = ef_password.EFPWContext()\n context.env, context.service = self.env, self.service\n context.secret_file = self.secret_file\n context.match = 'password'\n mock_context.return_value = context\n mock_create_aws.return_value = {\"kms\": self.mock_kms}\n ef_password.main()\n mock_gen.assert_called_once_with(context.secret_file, context.match, context.service, context.env, mock_create_aws.return_value)", "def test_main(self, mock_context, mock_create_aws, mock_gen):\n context = ef_password.EFPWContext()\n context.env, context.service, context.length = self.env, self.service, 24\n mock_context.return_value = context\n mock_create_aws.return_value = {\"kms\": self.mock_kms}\n ef_password.main()\n mock_gen.assert_called_once_with(24)\n self.mock_kms.decrypt.assert_not_called()\n self.mock_kms.encrypt.assert_called_once_with(\n KeyId='alias/{}-{}'.format(self.env, self.service),\n Plaintext=\"mock_secret\".encode()\n )", "def create_context(cls):\n pass", "def mock_context(mocker, user):\n return {\"request\": mocker.Mock(user=user)}", "def setUp(self):\n\n self.directory = tempfile.mkdtemp(dir=os.getcwd())\n spirv_args = self.test.spirv_args\n # Instantiate placeholders in spirv_args\n self.test.spirv_args = [\n arg.instantiate_for_spirv_args(self)\n if isinstance(arg, PlaceHolder) else arg for arg in self.test.spirv_args\n ]\n # Get all shader files' names\n self.inputs = [arg for arg in spirv_args if isinstance(arg, PlaceHolder)]\n self.file_shaders = [arg.filename for arg in self.inputs]\n\n if 'environment' in get_all_variables(self.test):\n self.test.environment.write(self.directory)\n\n expectations = [\n v for v in get_all_variables(self.test)\n if v.startswith(EXPECTED_BEHAVIOR_PREFIX)\n ]\n # Instantiate placeholders in expectations\n for expectation_name in expectations:\n expectation = getattr(self.test, expectation_name)\n if isinstance(expectation, list):\n expanded_expections = [\n element.instantiate_for_expectation(self)\n if isinstance(element, PlaceHolder) else element\n for element in expectation\n ]\n setattr(self.test, expectation_name, expanded_expections)\n elif isinstance(expectation, PlaceHolder):\n setattr(self.test, expectation_name,\n expectation.instantiate_for_expectation(self))", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def test_context(self):\n session = MagicMock()\n artifact_svc = MagicMock()\n file_svc = MagicMock()\n current_user = MagicMock()\n step_logger_svc = MagicMock()\n context = ExtensionContext(session, artifact_svc, file_svc, current_user, step_logger_svc, None)\n self.assertIsNotNone(context)", "def test_context_creation_and_retrieval(self):\n tracer_id = 'd551573a-01dc-41b2-b197-ea8afb7fbac1'.replace('-', '')\n\n with new_context(tracer_id=tracer_id):\n context = get_context()\n nose.tools.eq_(tracer_id, str(context.tracer_id))", "def _setup_app_context_for_test():\n ctx = application.app_context()\n ctx.push()\n yield # tests will run here\n ctx.pop()", "def test(ctx):\n pass", "def inner_test():\n pass", "def inner_test():\n pass", "def instantiate_for_expectation(self, testcase):\n raise PlaceHolderException('Subclass should implement this function.')", "def test_request_context_create(self, mock_rc_init):\n mock_request = MagicMock()\n rc = get_request_context(mock_request)\n mock_rc_init.assert_called_with(mock_request)\n self.assertIsInstance(rc, RequestContext)", "def setup_with_context_manager(testcase, cm):\n val = cm.__enter__()\n testcase.addCleanup(cm.__exit__, None, None, None)\n return val", "def SetExpectations(self):\n pass", "def test_mock_context(testdir):\n testdir.makepyfile(\n \"\"\"\n import requests\n import requests.sessions\n import pytest\n\n def test_context(requests_mock):\n original = requests.sessions.HTTPAdapter\n with requests_mock.patch('/api/not_test') as patch:\n assert requests.sessions.HTTPAdapter is not original\n assert requests.sessions.HTTPAdapter is original\n \"\"\"\n )\n\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines([\"*::test_context PASSED*\"])\n assert result.ret == 0", "def _context(name, func):\n\tpush_aspect(name, func)\n\tyield\n\tpop_aspect(name)", "def create_test_action(context, **kw):\n action = get_test_action(context, **kw)\n action.create()\n return action", "def setUp(self):\n self.app = Flask(__name__)\n self.gh = mock.MagicMock()\n self.db = mock.MagicMock()\n self.sc = mock.MagicMock()\n self.testcommand = TeamCommand(self.db, self.gh, self.sc)\n self.help_text = self.testcommand.help\n self.maxDiff = None", "def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)", "def before_test(self, func, *args, **kwargs):\n pass" ]
[ "0.64805484", "0.5710913", "0.55915433", "0.55788904", "0.5570544", "0.5555014", "0.5532453", "0.55062556", "0.545592", "0.5408811", "0.53556156", "0.53464824", "0.5333984", "0.5333984", "0.5314075", "0.53116554", "0.53097886", "0.52955264", "0.52648", "0.52648", "0.5262759", "0.52569103", "0.5253705", "0.52529466", "0.52491343", "0.5221272", "0.521918", "0.5213194", "0.52099407", "0.5205999" ]
0.5939782
1
Takes as input two dictionaries containing node>infection time and filter only the items that correspond to observer nodes, up to the max number of observers
def filter_diffusion_data(infected, obs, max_obs=np.inf): ### Filter only observer nodes obs_time = dict((k,v) for k,v in infected.items() if k in obs) ### If maximum number does not include every observer, we pick the max_obs closest ones if max_obs < len(obs_time): ### Sorting according to infection times & conversion to a dict of 2tuples node_time = sorted(obs_time.items(), key=operator.itemgetter(1), reverse=False) new_obs_time = {} ### Add max_obs closest ones (n, t) = node_time.pop() while len(new_obs_time) < max_obs: new_obs_time[n] = t (n, t) = node_time.pop() return new_obs_time else: return obs_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _FilterMMarks(self):\n\n to_remove = []\n tplus1 = datetime.datetime.now() - datetime.timedelta(hours=1)\n\n for (i, (m1, m2)) in enumerate(self._mmarks):\n if (m1.starttime < tplus1):\n to_remove.append(i)\n\n to_remove.reverse()\n for i in to_remove:\n self._mmarks.pop(i)", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def filter_nodes(nodes, filters):\n\n # print(\"Filtering nodes...\")\n\n kept_nodes = dict() # New list of filtered nodes\n\n for node_name in nodes:\n # print(\"Checking node \" + node_name)\n node = nodes[node_name]\n keep = True # Keep the node by default\n\n # Filter on properties\n # ----------------------------------------------\n if 'one-of-props' in filters:\n keep = False\n for prop in node['properties']:\n for check in filters['one-of-props']:\n if prop == check:\n keep = True\n break\n if keep == True:\n break # Stop here if node kept\n\n if not keep:\n continue # Move to next node\n\n if 'mandatory-props' in filters:\n keep = False\n for check in filters['mandatory-props']:\n for prop in node['properties']:\n if prop == check:\n keep = True\n break\n\n if not keep:\n break # Stop here if node kept\n\n if not keep:\n continue # Move to next node\n\n if 'exclude-props' in filters:\n for check in filters['exclude-props']:\n for prop in node['properties']:\n if prop == check:\n keep = False\n break\n\n if not keep:\n break # Stop here if node kept\n\n if not keep:\n continue # Move to next node\n\n # Filter on state\n # ----------------------------------------------\n if 'mandatory-states' in filters:\n keep = False\n for check in filters['mandatory-states']:\n if node['state'] == check:\n keep = True\n break\n\n if not keep:\n continue # Move to next node\n\n if 'exclude-states' in filters:\n for check in filters['exclude-states']:\n if node['state'] == check:\n keep = False\n break\n\n if not keep:\n continue # Move to next node\n\n # print(\"Keeping \", node_name)\n\n kept_nodes[node_name] = node\n\n return kept_nodes", "def filter_time_match(file1, file2):\n freq1 = int(file1.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n freq2 = int(file2.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n df1, df2 = filter_overlapping_files_dfs(file1, file2)\n\n dt1 = pandas.to_datetime(df1[\"date\"] + \" \" + df1[\"hour\"])\n dt2 = pandas.to_datetime(df2[\"date\"] + \" \" + df2[\"hour\"])\n\n dt_delta = datetime.timedelta(minutes=freq2 - freq1)\n time_match_df1 = dt1.copy()\n time_match_df2 = dt2.copy()\n for idx, dt in dt2.items():\n match = dt1[(dt1 >= dt) & (dt1 <= dt + dt_delta)]\n time_match_df1[match.index] = idx\n time_match_df2[idx] = 0\n time_match_df2[idx] = tuple(match.index)\n\n time_match_df2[time_match_df2.apply(len) != 10]\n return time_match_df1, time_match_df2", "def filter(self, observable):", "def time_filter(records, seconds):\n delta = datetime.timedelta(seconds)\n records = iter(records)\n previous = next(records)\n yield previous\n current = None\n fields = ['host', 'type', 'user_agent', 'info']\n\n for record in records:\n current = record\n for field in fields:\n if current[field] != previous[field]:\n yield current\n break\n else:\n if previous['datetime'] + delta < current['datetime']:\n yield current\n\n previous = current", "def filter_rare_node(users, businesses, reviews, user_thresh, business_thresh, friend_thresh):\n continue_filter = True\n filtered_users = set()\n filtered_businesses = set()\n while continue_filter:\n continue_filter = False\n # filter step 1\n users_interact_ind = {}\n business_interact_ind = {}\n for review in reviews:\n user_id = review['user_id'] # a list\n business_id = review['business_id'] # a list\n users_interact_ind[user_id] = users_interact_ind.get(user_id, 0) + 1\n business_interact_ind[business_id] = business_interact_ind.get(business_id, 0) + 1\n\n filtered_review_users = set(u for u in users_interact_ind.keys() if users_interact_ind[u]>=user_thresh)\n filtered_review_businesses = set(b for b in business_interact_ind.keys() if business_interact_ind[b]>=business_thresh)\n \n # loop until users' reviews equal to filtered reviews\n if (filtered_users != filtered_review_users) or (filtered_businesses != filtered_review_businesses):\n continue_filter = True\n\n # filter step 2\n # filter user and business\n # make user_friends_dict, only those users with lots of friends can be included\n user_friends_dict = {}\n for user in users:\n user_id = user['user_id']\n if user_id not in filtered_review_users:\n continue\n if not user['friends']:\n continue\n filtered_friends = [friend.strip() for friend in user['friends'].split(',') if friend.strip() in filtered_review_users]\n if len(filtered_friends) >= friend_thresh:\n user_friends_dict[user_id] = filtered_friends # users with friends larger than friend_thresh\n\n continue_inside = True\n while continue_inside:\n friends = {}\n continue_inside = False\n for user, user_friends in user_friends_dict.items():\n filtered_friends = [friend for friend in user_friends if friend in user_friends_dict] # friend in user_friends_dict's keys\n if len(filtered_friends) >= friend_thresh:\n friends[user] = filtered_friends\n else:\n continue_inside = True\n user_friends_dict = deepcopy(friends) # this takes time\n\n filtered_users = set(user_friends_dict.keys())\n filtered_businesses_list = []\n\n for business in businesses:\n business_id = business['business_id']\n if business_id not in filtered_review_businesses:\n continue\n if not business['categories']:\n continue\n if not business['city']:\n continue\n filtered_businesses_list.append(business_id)\n filtered_businesses = set(filtered_businesses_list)\n\n filtered_review = []\n for review in reviews:\n if (review['user_id'] in filtered_users) and (review['business_id'] in filtered_businesses):\n filtered_review.append(review)\n reviews = deepcopy(filtered_review) # this takes time\n\n print(len(list(filtered_users)))\n print(len(list(filtered_businesses)))\n print(len(reviews))\n print('filter loop')\n\n print('filter complete')\n return filtered_users, filtered_businesses, filtered_review", "def _filter_node_map(\n node_map: 'collections.OrderedDict[str, p_pb2.PipelineNode]',\n from_node_ids: Collection[str], to_node_ids: Collection[str],\n skip_node_ids: Collection[str]\n) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]':\n ancestors_of_to_nodes = _traverse(node_map, _Direction.UPSTREAM, to_node_ids)\n descendents_of_from_nodes = _traverse(node_map, _Direction.DOWNSTREAM,\n from_node_ids)\n nodes_to_keep = ancestors_of_to_nodes.intersection(\n descendents_of_from_nodes) - set(skip_node_ids)\n filtered_node_map = collections.OrderedDict()\n for node_id, node in node_map.items():\n if node_id in nodes_to_keep:\n filtered_node_map[node_id] = node\n return filtered_node_map", "def filter_events_before_infection(events, admittime, infection_time, preceding_time,\n datetime_pattern=DATETIME_PATTERN, time_key=\"charttime\"):\n admittime_datetime = datetime.strptime(admittime, datetime_pattern)\n infection_datetime = datetime.strptime(infection_time, datetime_pattern) - timedelta(hours=preceding_time)\n new_events = []\n for event in events:\n # Pega a data do evento e o transforma em datetime\n event_datetime = datetime.strptime(event[time_key], datetime_pattern)\n # Compara se o evento aconteceu entre a data de adimissão e a data de infecção (já alterada)\n if event_datetime > admittime_datetime and event_datetime <= infection_datetime:\n new_events.append(event)\n return new_events", "def filter_spot_duration(connected_data, min_len):\n filtered_data = {}\n spot_num = 1\n for spot in connected_data:\n if (connected_data[spot].shape[0] >= min_len):\n filtered_data[spot_num] = connected_data[spot]\n spot_num = spot_num + 1\n return filtered_data", "def _compute_observations(self):\n observations = {}\n for ts in self.ts_ids:\n if self.traffic_signals[ts].time_to_act() or self.traffic_signals[ts].regular_obs() :\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def filter_times(timestamps, time_difference):\n timestamps = sorted(set(timestamps))\n\n filtered_timestamps = []\n for current_timestamp in timestamps:\n if not filtered_timestamps or current_timestamp - filtered_timestamps[-1] > time_difference:\n filtered_timestamps.append(current_timestamp)\n\n return filtered_timestamps", "def compare_nodes(n1, n2):\n if not isinstance(n1, dict):\n raise TypeError(\"Invalid n1! Expected dict, got %s instead\" %\n type(n1).__name__)\n if not isinstance(n2, dict):\n raise TypeError(\"Invalid n2! Expected dict, got %s instead\" %\n type(n2).__name__)\n\n if not Pharmacophore.check_node(n1):\n raise ValueError(\"Invalid n1!\")\n\n if not Pharmacophore.check_node(n2):\n raise ValueError(\"Invalid n2!\")\n\n c = n1[\"freq\"] + n2[\"freq\"]\n d1 = sum(n1[\"type\"].values())\n d2 = sum(n2[\"type\"].values())\n d = d1 + d2\n sim = 0.0\n t = {}\n\n for phar in PHARS:\n if phar in n1[\"type\"] and phar in n2[\"type\"]:\n sim += (n1[\"type\"][phar] + n2[\"type\"][phar]) / d\n t[phar] = n1[\"type\"][phar] + n2[\"type\"][phar]\n elif phar in n1[\"type\"]:\n t[phar] = n1[\"type\"][phar]\n elif phar in n2[\"type\"]:\n t[phar] = n2[\"type\"][phar]\n return sim * c, t", "def test_disjoint_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_disjoint_from.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0044848__GO:0051179__disjoint_from\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0044848\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0051179\")", "def _hist_filter_ts(commands, start_time, end_time):\n for cmd in commands:\n if start_time <= cmd[1] < end_time:\n yield cmd", "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def getDownstream(node, distance, pInteractions):\n seenNodes = set([node])\n borderNodes = [node]\n frontierNodes = []\n for dist in range(distance):\n while len(borderNodes) > 0:\n currNode = borderNodes.pop()\n if currNode in pInteractions:\n for i in pInteractions[currNode].keys():\n if i not in seenNodes:\n seenNodes.update([i])\n frontierNodes.append(i)\n borderNodes = deepcopy(frontierNodes)\n frontierNodes = list()\n return(seenNodes)", "def available_processes(processes, time):\n return filter(lambda x: ((x['arrival_time'] <= time) and (x['remaining_time'] > 0)), processes)", "def filterEvents(intervals_dates,list_infected,distance):\n d=distance\n list_gpsevents=[]\n for z in range(len(intervals_dates)-1):\n print(\"Interval: \",intervals_dates[z], \"y\", intervals_dates[z+1])\n infected,uninfected=getTrazaTimestamp(intervals_dates[z],intervals_dates[z+1],GPSrecords,list_infected)\n events_gps = nearest_neighbor(infected, uninfected, d)\n events_gps = events_gps.drop(['geometry','closest_stop_geom'], axis=1)\n print(len(events_gps))\n if(len(events_gps)!=0):\n list_gpsevents.append(events_gps.reset_index(drop=True))\n else:\n events_gps=pd.DataFrame()\n list_gpsevents.append(events_gps)\n #GPSevents=pd.concat(list_gpsevents).reset_index(drop=True)\n #return GPSevents\n return list_gpsevents", "def graphformation(time_lower, time_upper):\n\tprm = param.Para()\n\ttry:\n\t\tdb_connection = mysql.connector.connect(\n\t\t host=prm.hostname,\n\t\t user=prm.username,\n\t\t passwd=prm.password,\n\t\t database= prm.dbname\n\t\t )\n\t\tdb_cursor = db_connection.cursor()\n\texcept:\n\t\tprint(\"Can't Connect to database, check credentials in parameter file\")\n\tquery = (\"SELECT * FROM identity \")\n\tdb_cursor.execute(query)\n\tdf1=pd.DataFrame(db_cursor.fetchall())\n\tdf1.columns= ['node','deviceid','student','rollno']\n\tdict_identity = dict(zip(df1.deviceid, df1.node))\n\trev_dict_identity = dict(zip(df1.node, df1.deviceid ))\n\tquery = (\"SELECT * FROM activity WHERE time BETWEEN '{}' AND '{}'\".format(time_lower,time_upper)) ## incomplete\n\tdb_cursor.execute(query)\n\tactivity_data = pd.DataFrame(db_cursor.fetchall())\n\tif activity_data.empty==False:\n\t\tactivity_data.columns=[\"sl_no\",\"time\",\"node\",\"latitude\",\"longitude\"]\n\telse:\n\t\tprint(\"No Activity in the selected Time Window\")\n\t\treturn\n\tnumnodes= len(df1)\n\tedges= []\n\tscore = {}\n\t#print(activity_data)\n\ttime_groups = activity_data.groupby('time')\n\twith open(r'C:\\Users\\HP\\Desktop\\project\\Contact_Graph\\bluetooth.txt') as json_file:\n\t\tdata1 = json.load(json_file)\n\tfor name, group in time_groups:\n\t\tscore_tmp = decayfunc(name,time_upper)\n\t\tgroup = group.sort_values('node')\n\t\tfor i in range(len(group)-1):\n\t\t\tnode1 = group.iloc[i,2]\n\t\t\t###########################\n\t\t\tlistnearby=[]\n\t\t\ttry:\n\t\t\t\tlistnearby = data1[rev_dict_identity[node1]][str(name)]\n\t\t\t\tlistnearby = [dict_identity[i] for i in listnearby if dict_identity[i]>node1]\n\t\t\t\tfor i in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(node1,i)]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(node1,i)]=1\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t###########################\n\t\t\tfor j in range(i+1,len(group)):\n\t\t\t\tnode2 =group.iloc[j,2]\n\t\t\t\tif proximityfunc(group.iloc[i,3],group.iloc[i,4],group.iloc[j,3],group.iloc[j,4]) and node2 not in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]=1\n\tnode_list = list(df1.node)\n\ttitle_list = list(df1.deviceid)\n\tedges_list = []\n\tfor edge,val in score.items():\n\t\tedges_list.append((int(edge[0]),int(edge[1]),float(val)))\n\n\treturn edges_list,node_list,title_list", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def print_twchang(nodesdict):\n\tqstat_str = sub.check_output(\"qstat\", shell=True).decode(\"utf-8\")\n\tmatch_list = re.findall(r\"(\\d+).*?\\s+.*?\\s+(.*?)\\s+.*\", qstat_str) # [(jobname, username)]\n\ttwchangset = set([a[0] for a in match_list if a[1] == \"twchang\"])\n\tinlist = [node for node in nodesdict if twchangset & nodesdict[node][\"users\"]]\n\tprint(\"Warning, twchang already in {}\".format(\", \".join(inlist))) if inlist else 0", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = no_communication_skar(d, source, target, others)\n return uniques", "def filter_nodes(request):\n\tmodel_dict = {\"player\": {'qs': Player.objects.all(), 'SerClass': PlayerSerializer},}\n\n\tlist_country_query = [e for e in ['player_country', 'band_country', 'venue_country', 'festival_country', 'album_country'] if e in request.query_params.keys()]\n\t# check that the nodes in the list_country_query are also selected !\n\tlist_country_query = [e for e in list_country_query if e[:-8] in ['player']]\n\n\t# filter each country\n\tif list_country_query != []:\n\t\tfor country_q in list_country_query:\n\t\t\t\n\t\t\t# for player_countries: treat it as a list so you query several countries at once\n\t\t\tif (country_q == 'player_country') and (hasattr(request.query_params, \"getlist\")):\n\t\t\t\tquery_list_countries = request.query_params.getlist('player_country')\n\t\t\t\t# have several countries in the filter\n\t\t\t\tQ_query_filter = Q()\n\t\t\t\tfor ql in query_list_countries:\n\t\t\t\t\tQ_query_filter |= Q(country=ql)\n\t\t\t\tmodel_dict[country_q[:-8]]['qs'] = model_dict[country_q[:-8]]['qs'].filter(Q_query_filter)\n\n\t\t\t# for everything else: only filter one option at a time\n\t\t\telse:\n\t\t\t\tmodel_dict[country_q[:-8]]['qs'] = model_dict[country_q[:-8]]['qs'].filter(country=request.query_params[country_q])\n\telse:\n\t\tpass\n\n\tlist_filter_query = [e for e in ['instrument', 'active', 'name'] if e in request.query_params.keys()]\n\tif 'name' in request.query_params.keys():\n\t\tfor k, v in model_dict.items():\n\t\t\tv['qs'] = v['qs'].filter(name__startswith=request.query_params['name'])\n\t\n\tif 'instrument' in request.query_params.keys():\n\t\t# build the query filter so that you can query several instruments at once\n\t\tQ_query_filter = Q()\n\t\tfor ql in request.query_params.getlist('instrument'):\n\t\t\tQ_query_filter |= Q(instrument__name=ql)\n\t\tmodel_dict['player']['qs'] = model_dict['player']['qs'].filter(Q_query_filter)\n\n\tif 'active' in request.query_params.keys():\n\t\tfor k, v in model_dict.items():\n\t\t\tv['qs'] = v['qs'].filter(isactive=request.query_params['active'])\n\treturn model_dict", "def subdata(min_,dict_):\n list_ = []\n return [value for value,freq in dict_.items() if freq > min_]", "def test_subset_by_time(self):\n\n this_satellite_dict = satellite_io.subset_by_time(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC\n )[0]\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_TIME\n ))", "def _find_elements_without_transfer_topics(self, addresses: Sequence[str], from_block_number: int,\n to_block_number: int) -> List[Dict[str, Any]]:\n try:\n erc20_transfer_events = self.ethereum_client.erc20.get_total_transfer_history(from_block=from_block_number,\n to_block=to_block_number)\n except RequestException as e:\n raise self.FindRelevantElementsException('Request error retrieving erc20 events') from e\n\n filtered_events = []\n addresses_set = set(addresses) # Linear time `in` filtering\n for event in erc20_transfer_events:\n event_args = event.get('args')\n if event_args and (event_args.get('from') in addresses_set or event_args.get('to') in addresses_set):\n filtered_events.append(self._transform_transfer_event(event))\n return filtered_events", "def part_two():\n tasks = {}\n current_time = 0\n while G.nodes():\n # noinspection PyCallingNonCallable\n candidate_next_tasks = [task for task in G.nodes()\n if task not in tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]\n tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}\n G.remove_node(completed_task)\n return current_time", "def observatories():\n\n obs_db = {}\n\n obs_db['PWT-Oxford'] = { 'long':'-01:15:00', \\\n 'lat':'+51:45:00', \\\n 'altitude-metres':130.0, \\\n 'timezone':'Europe/London' }\n\n obs_db['LaPalma'] = { 'lat':'+28:45:00', \\\n 'long':'-17:53:00', \\\n 'altitude-metres':2326, \\\n 'timezone':'Atlantic/Canary' }\n \n obs_db['Paranal'] = { 'lat':'-24:37:00', \\\n 'long':'-70:24:00', \\\n 'altitude-metres':2635, \\\n 'timezone':'America/Santiago' }\n\n obs_db['LaSilla'] = { 'lat':'-29:15:00', \\\n 'long':'-70:44:00', \\\n 'altitude-metres':2380, \\\n 'timezone':'America/Santiago' }\n\n obs_db['MaunaKea'] = { 'lat':'+19:50:00', \\\n 'long':'-155:28:00', \\\n 'altitude-metres':4190, \\\n 'timezone':'Pacific/Honolulu' }\n \n obs_db['SidingSpring'] = { 'lat':'-31:16:00', \\\n 'long':'+149:04:00', \\\n 'altitude-metres':1149, \\\n 'timezone':'Australia/Sydney' }\n \n obs_db['KittPeak'] = { 'lat':'+31:58:00', \\\n 'long':'-111:36:00', \\\n 'altitude-metres':2096, \\\n 'timezone':'America/Phoenix' }\n\n obs_db['CalarAlto'] = { 'lat':'+37:13:25', \\\n 'long':'-2:32:47', \\\n 'altitude-metres':2168, \\\n 'timezone':'Europe/Madrid' }\n \n obs_db['Gemini-N'] = { 'lat':'+19:49:26', \\\n 'long':'-155:28:09', \\\n 'altitude-metres':4213, \\\n 'timezone':'Pacific/Honolulu' }\n\n obs_db['Gemini-S'] = { 'lat':'-30:14:27', \\\n 'long':'-70:44:12', \\\n 'altitude-metres':2722, \\\n 'timezone':'America/Santiago' }\n\n return obs_db" ]
[ "0.5169515", "0.5160301", "0.5146102", "0.50290227", "0.50263584", "0.50080407", "0.49891952", "0.49636072", "0.4950326", "0.49214664", "0.48818153", "0.48559263", "0.4845422", "0.48023024", "0.47504705", "0.47441253", "0.47315213", "0.4721948", "0.47171888", "0.4712595", "0.47101063", "0.47009027", "0.4696696", "0.4693909", "0.46882012", "0.46726692", "0.4672074", "0.46595103", "0.46565038", "0.46334717" ]
0.6450988
0
Contructor. Sets up the eyepoint, lookat and up arrays.
def __init__(self): self.eyepoint = np.array([*self.eyepoint], dtype=np.float32) self.lookat = np.array([*self.lookat], dtype=np.float32) self.up = np.array([*self.up], dtype=np.float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.location = [(0, 0), (0, 1)]\n self.hit = (0, 0)", "def setUp(self):\n\n self.eps = 0.001 # Accept 0.1 % relative error\n\n self.RSISE = Point(-35.27456, 149.12065)\n self.Home = Point(-35.25629, 149.12494) # 28 Scrivener Street, ACT\n self.Syd = Point(-33.93479, 151.16794) # Sydney Airport\n self.Nadi = Point(-17.75330, 177.45148) # Nadi Airport\n self.Kobenhavn = Point(55.70248, 12.58364) # Kobenhavn, Denmark\n self.Muncar = Point(-8.43, 114.33) # Muncar, Indonesia", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, x, y, data):\n super().__init__(x=x, y=y, data=data, has_analytic_ft=False)\n self._ee = {}\n self._mtf = None\n self._nu_p = None\n self._dnx = None\n self._dny = None", "def __init__(self):\n\n self.points = None\n self.centroid_activation_frames = None\n self.noiseless_frames = None\n self.frames = None", "def __init__(self, points):\n self.endpoints = points", "def __init__(self, points):\n self.points = points\n self.init()", "def __init__(self):\n self.X = None\n self.Y = None\n self.features = None\n self.max = self.min = None\n self._look_up = None\n self.attr_weight = None", "def __init__(self, x, y, u):\n self.x = x\n self.y = y\n self.u = u", "def __init__(self, location):\r\n self.y = location[1]\r\n self.x = location[0]\r\n self.yvel = 0.0\r\n self.xvel = 0.0", "def __init__(self, num_points = 5000):\n self.num_points = num_points\n\n #all walks start at 0.0\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, look_from, look_to, look_up=vec3(0, 1, 0),\n interspatial_distance=1.0,\n spatial_rows=8, spatial_cols=8):\n self.set_look(look_from, look_to, look_up)\n self.spatial_rows = spatial_rows\n self.spatial_cols = spatial_cols\n self.interspatial_distance = interspatial_distance", "def __init__(self, x, y, z): \n\t\tself.x = x # x coordinate (EW distance from observatory center)\n\t\tself.y = y # y coordinate (NS distance from observatory center)\n\t\tself.z = z # z coordinate (altitude rel. to observatory center)", "def setUp(self):\n np.random.seed(1234)\n\n _TEST_FILE_NAME = 'AHN3.las'\n _TEST_DATA_SOURCE = 'testdata'\n\n _CYLINDER = InfiniteCylinder(4)\n _PC_260807 = load(os.path.join(_TEST_DATA_SOURCE, _TEST_FILE_NAME))\n _PC_1000 = copy_point_cloud(_PC_260807, array_mask=(\n np.random.choice(range(len(_PC_260807[keys.point]['x']['data'])), size=1000, replace=False)))\n _1000_NEIGHBORHOODS_IN_260807 = list(compute_neighbors.compute_neighborhoods(_PC_260807, _PC_1000, _CYLINDER))\n\n self.point_cloud = _PC_260807\n self.neigh = _1000_NEIGHBORHOODS_IN_260807", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def __init__(self, points):\n\t\tself.points = points", "def _setup_ndarrays(self) -> None:\n empty = self.ele_orig * 0\n # 2D arrays\n self.ele = np.copy(self.ele_orig) # Elevation including glaciers\n self.slp = np.copy(empty) # Slope with glacier geometry\n self.asp = np.copy(empty) # Classified aspect with glacier geometry\n self.h = np.copy(empty) # Local glacier height\n self.u = np.copy(empty) # Local glacier velocity\n self.hs = hillshade(\n self.ele_orig,\n self.PLOT_HILLSHADE_AZIMUTH,\n self.PLOT_HILLSHADE_ALTITUDE,\n ) # HS\n\n # Initialize array store\n self.store = ArrayStore()\n self.store.create(\"h\", self.MODEL_RECORD_SIZE)\n self.store.create(\"u\", self.MODEL_RECORD_SIZE)", "def __init__(self, input_object, **kwargs):\r\n super().__init__(input_object = input_object, **kwargs)\r\n self.timepoints = np.array(self.input_object['timepoints']).squeeze()\r\n self.raw_gaze_X = np.array(self.input_object['gaze_X']).squeeze()\r\n self.raw_gaze_Y = np.array(self.input_object['gaze_Y']).squeeze()\r\n self.raw_pupil = np.array(self.input_object['pupil']).squeeze()\r\n\r\n if not hasattr(self, 'sample_rate'): # this should have been set as a kwarg, but if it hasn't we just assume a standard 1000 Hz\r\n self.sample_rate = 1000.0\r\n\r\n if hasattr(self, 'eyelink_blink_data'):\r\n # internalize all blinks smaller than 4 seconds, since these are missing signals to be treated differently\r\n self.blink_starts_EL = (np.array(self.eyelink_blink_data['start_timestamp']) - self.timepoints[0]) / (1000.0 / self.sample_rate) # defined in samples\r\n self.blink_ends_EL = (np.array(self.eyelink_blink_data['end_timestamp']) - self.timepoints[0]) / (1000.0 / self.sample_rate) # defined in samples\r\n self.blink_dur_EL = self.blink_ends_EL - self.blink_starts_EL\r\n self.blink_starts_EL = self.blink_starts_EL[self.blink_dur_EL<4000]\r\n self.blink_ends_EL = self.blink_ends_EL[self.blink_dur_EL<4000]\r\n self.blink_dur_EL = self.blink_dur_EL[self.blink_dur_EL<4000]\r\n\r\n if hasattr(self, 'eyelink_sac_data'):\r\n self.sac_starts_EL = (np.array(self.eyelink_sac_data['start_timestamp']) - self.timepoints[0]) / (1000.0 / self.sample_rate) # defined in samples\r\n self.sac_ends_EL = (np.array(self.eyelink_sac_data['end_timestamp']) - self.timepoints[0]) / (1000.0 / self.sample_rate) # defined in samples\r\n self.sac_dur_EL = self.sac_ends_EL - self.sac_starts_EL", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def setup_class(self):\n self.dset = read_ww3(os.path.join(FILES_DIR, \"ww3file.nc\"))\n # First two sites are exact matches, third site is in between\n self.lons = [92.00, 92.10, 92.05]\n self.lats = [19.80, 19.95, 19.88]\n self.lons_exact = self.lons[:2]\n self.lats_exact = self.lats[:2]\n self.lons_inexact = self.lons[-1:]\n self.lats_inexact = self.lats[-1:]", "def setup_class(self):\n self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)\n self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)\n self.jf = JointFitter(\n [self.g1, self.g2], {self.g1: [\"amplitude\"], self.g2: [\"amplitude\"]}, [9.8]\n )\n self.x = np.arange(10, 20, 0.1)\n y1 = self.g1(self.x)\n y2 = self.g2(self.x)\n\n with NumpyRNGContext(_RANDOM_SEED):\n n = np.random.randn(100)\n\n self.ny1 = y1 + 2 * n\n self.ny2 = y2 + 2 * n\n self.jf(self.x, self.ny1, self.x, self.ny2)", "def __init__(self):\n\n self.X = None\n self.y = None", "def __init__(self):\n\n self.X = None\n self.y = None", "def __init__(self, x, y):\n self._x = x\n self._y = y", "def __init__(self):\n position = GenerateWay(X, depo)\n fitness = fitness_func(position)\n\n self.position = position\n self.fitness = fitness", "def __init__(self, x, y):\n self._x, self._y = x, y", "def __init__(self):\n this = _sunpos.new_cLocation()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY" ]
[ "0.683656", "0.65527457", "0.64761674", "0.64761674", "0.6473272", "0.63640285", "0.6351586", "0.62831527", "0.625638", "0.6231856", "0.6194024", "0.61840737", "0.61764055", "0.61608475", "0.6154139", "0.6148525", "0.6146855", "0.61170983", "0.6087394", "0.60697556", "0.60492843", "0.6046341", "0.60455054", "0.60455054", "0.6030129", "0.60292566", "0.6016867", "0.6010303", "0.6009452", "0.6009452" ]
0.80508184
0
Calculates the view matrix and passes it to a given shader program.
def setup_view(self, shader_program): n = self.normalize(self.eyepoint - self.lookat) u = self.normalize(np.cross(self.normalize(self.up), n)) v = self.normalize(np.cross(n, u)) view_mat = np.array([u[0], v[0], n[0], 0.0, u[1], v[1], n[1], 0.0, u[2], v[2], n[2], 0.0, -np.dot(u, self.eyepoint), -np.dot(v, self.eyepoint), -np.dot(n, self.eyepoint), 1.0], dtype=np.float32) view_location = glGetUniformLocation(shader_program, "view") glUseProgram(shader_program) glUniformMatrix4fv(view_location, 1, GL_FALSE, view_mat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def setup(self, shader_program):\n self.setup_view(shader_program)\n self.setup_projection(shader_program)", "def draw(self, shader=None, txtrs=None, ntl=None, shny=None, camera=None, mlist=[]):\r\n self.load_opengl() # really just to set the flag so _unload_opengl runs\r\n\r\n from pi3d.Camera import Camera\r\n\r\n camera = camera or self._camera or Camera.instance()\r\n shader = shader or self.shader\r\n shader.use()\r\n\r\n if self.MFlg == True or len(mlist):\r\n # Calculate rotation and translation matrix for this model using numpy.\r\n self.MRaw = dot(self.tr2,\r\n dot(self.scl,\r\n dot(self.roy,\r\n dot(self.rox,\r\n dot(self.roz, self.tr1)))))\r\n # child drawing addition #############\r\n newmlist = [m for m in mlist]\r\n newmlist.append(self.MRaw)\r\n if len(self.children) > 0:\r\n for c in self.children:\r\n c.draw(shader, txtrs, ntl, shny, camera, newmlist)\r\n for m in mlist[-1::-1]:\r\n self.MRaw = dot(self.MRaw, m)\r\n ######################################\r\n self.M[0:16] = self.MRaw.ravel()\r\n #self.M[0:16] = c_floats(self.MRaw.reshape(-1).tolist()) #pypy version\r\n self.M[16:32] = dot(self.MRaw, camera.mtrx).ravel()\r\n #self.M[16:32] = c_floats(dot(self.MRaw, camera.mtrx).reshape(-1).tolist()) #pypy\r\n self.MFlg = False\r\n\r\n elif camera.was_moved:\r\n # Only do this if it's not done because model moved.\r\n self.M[16:32] = dot(self.MRaw, camera.mtrx).ravel()\r\n\r\n if camera.was_moved:\r\n self.unif[18:21] = camera.eye[0:3]\r\n\r\n opengles.glUniformMatrix4fv(shader.unif_modelviewmatrix, 2,\r\n ctypes.c_int(0),\r\n ctypes.byref(self.M))\r\n\r\n opengles.glUniform3fv(shader.unif_unif, 20, ctypes.byref(self.unif))\r\n for b in self.buf:\r\n # Shape.draw has to be passed either parameter == None or values to pass\r\n # on.\r\n b.draw(self, shader, txtrs, ntl, shny)", "def __generate_model(self):\n glEnable(GL_CULL_FACE)\n glCullFace(GL_FRONT)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n self.__sh.change_shader(vertex=0, fragment=0)\n self.__prepare_shaders(self.__model_matrix, self.__light_matrix, False)\n self.__sh.bind_buffer()\n self.__sh.use_shaders()\n glDrawElements(GL_TRIANGLES, View.__triangles.size,\n GL_UNSIGNED_SHORT, View.__triangles)\n self.__sh.clear()", "def use(self):\r\n opengles.glUseProgram(self.program)", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def draw(self, projection, view, _model, **_kwargs):\n\n shid = self.skinning_shader.glid\n GL.glUseProgram(shid)\n\n # setup camera geometry parameters\n loc = GL.glGetUniformLocation(shid, 'projection')\n GL.glUniformMatrix4fv(loc, 1, True, projection)\n loc = GL.glGetUniformLocation(shid, 'view')\n GL.glUniformMatrix4fv(loc, 1, True, view)\n # bone world transform matrices need to be passed for skinning\n for bone_id, node in enumerate(self.bone_nodes):\n bone_matrix = node.world_transform @ self.bone_offsets[bone_id]\n\n bone_loc = GL.glGetUniformLocation(shid, 'boneMatrix[%d]' % bone_id)\n GL.glUniformMatrix4fv(bone_loc, 1, True, bone_matrix)\n\n # draw mesh vertex array\n self.vertex_array.draw(GL.GL_TRIANGLES)\n\n # leave with clean OpenGL state, to make it easier to detect problems\n GL.glUseProgram(0)", "def draw(self, proj_mat, view_mat, time=0):\n if self.mesh_shader:\n self.mesh_shader.draw(self, proj_mat, view_mat, time=time)", "def setup_projection(self, shader_program):\n projection_mat = np.array([(2.0*self.near)/(self.right-self.left), 0.0, 0.0, 0.0,\n 0.0, ((2.0*self.near)/(self.top-self.bottom)), 0.0, 0.0,\n ((self.right+self.left)/(self.right-self.left)),\n ((self.top+self.bottom)/(self.top-self.bottom)),\n ((-1.0*(self.far+self.near)) / (self.far-self.near)), -1.0,\n 0.0, 0.0, ((-2.0*self.far*self.near)/(self.far-self.near)),\n 0.0], dtype=np.float32)\n\n projection_location = glGetUniformLocation(shader_program, \"projection\")\n glUniformMatrix4fv(projection_location, 1, GL_FALSE, projection_mat)", "def draw(self, projection, view, _model, K_s=(0.0000000007, 0.0000000007, 0.0000000007), K_d=(0.00010, 0.00006, 0), light_position=(256, 0, 0), s=1.1, normal_mapping = 1.0, **_kwargs):\n\n shid = self.skinning_shader.glid\n GL.glUseProgram(shid)\n\n # setup camera geometry parameters\n loc = GL.glGetUniformLocation(shid, 'projection')\n GL.glUniformMatrix4fv(loc, 1, True, projection)\n loc = GL.glGetUniformLocation(shid, 'view')\n GL.glUniformMatrix4fv(loc, 1, True, view)\n\n #Display texture\n loc = GL.glGetUniformLocation(shid, 'diffuseMap')\n GL.glActiveTexture(GL.GL_TEXTURE0)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture.glid)\n GL.glUniform1i(loc, 0)\n\n #Normals texture\n loc = GL.glGetUniformLocation(shid, 'normalMap')\n GL.glActiveTexture(GL.GL_TEXTURE1)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.normalMap.glid)\n GL.glUniform1i(loc, 0)\n\n #Specular texutures texture\n loc = GL.glGetUniformLocation(shid, 'specMap')\n GL.glActiveTexture(GL.GL_TEXTURE1)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.specMap.glid)\n GL.glUniform1i(loc, 0)\n\n #setup phong parameters\n names = ['light_position', 'K_d', 'K_s', 's', 'normal_mapping']\n loc = {n: GL.glGetUniformLocation(self.skinning_shader.glid, n) for n in names}\n GL.glUniform3fv(loc[\"light_position\"], 1, light_position)\n GL.glUniform3fv(loc[\"K_d\"], 1, K_d)\n GL.glUniform3fv(loc[\"K_s\"], 1, K_s)\n GL.glUniform1f(loc[\"s\"], s)\n GL.glUniform1f(loc[\"normal_mapping\"], normal_mapping)\n\n\n # bone world transform matrices need to be passed for skinning\n for bone_id, node in enumerate(self.bone_nodes):\n bone_matrix = node.world_transform @ self.bone_offsets[bone_id]\n\n bone_loc = GL.glGetUniformLocation(shid, 'boneMatrix[%d]' % bone_id)\n GL.glUniformMatrix4fv(bone_loc, 1, True, bone_matrix)\n\n # draw mesh vertex array\n self.vertex_array.draw(GL.GL_TRIANGLES)\n\n # leave with clean OpenGL state, to make it easier to detect problems\n GL.glUseProgram(0)", "def set_matrix(self):\n theta1 = -90\n theta2 = 105\n theta3 = 180\n\n if self.number > 8:\n theta2 = 75\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def render( self, shader, mode ):\n location = shader.getLocation( mode, self.name, uniform=False )\n if location is not None and location != -1:\n vbo = self.buffer.bind( mode )\n glVertexAttribPointer( \n location, self.size, GL_FLOAT, False, self.stride, \n vbo+self.offset\n )\n glEnableVertexAttribArray( location )\n return (vbo,location)\n return None", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def render( self, shader, mode, location=None ):\n if location is None:\n location = self.location( shader, mode )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n shape = value.shape \n shape_length = len(self.shape)\n if shape[-shape_length:] != self.shape:\n # uninitialized at the Python level, do not set...\n return None\n if shape[:-shape_length]:\n size = reduce( operator.mul, shape[:-shape_length] )\n else:\n size = 1\n if self.NEED_TRANSPOSE is not None:\n return self.baseFunction( location, size, self.NEED_TRANSPOSE, value )\n else:\n return self.baseFunction( location, size, value )\n return None", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def compile(self, mode, shader):\n holder = self.holderDepend( mode.cache.holder(self,None) )\n # TODO: depend on shader.material as well...\n # TODO: the compiled shader needs to depend on *everything* \n # down the set of objects...\n program = glCreateProgram()\n holder.data = program\n subShaders = []\n for shader in self.shaders:\n # TODO: cache links...\n subShader = shader.compile()\n if subShader:\n glAttachShader(program, subShader )\n subShaders.append( subShader )\n elif shader.source:\n log.warn( 'Failure compiling: %s %s', shader.compileLog, shader.url or shader.source )\n if len(subShaders) == len(self.shaders):\n glLinkProgram(program)\n glUseProgram( program )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.bind( self, mode, i ):\n i += 1\n \n glValidateProgram( program )\n validation = glGetProgramiv( program, GL_VALIDATE_STATUS )\n if validation == GL_FALSE:\n self.compileLog += \"\"\"Validation failure (%s): %s\"\"\"%(\n validation,\n glGetProgramInfoLog( program ),\n )\n program = False \n else:\n link_status = glGetProgramiv( program, GL_LINK_STATUS )\n if link_status == GL_FALSE:\n self.compileLog += \"\"\"Link failure (%s): %s\"\"\"%(\n link_status,\n glGetProgramInfoLog( program ),\n )\n program = False\n for subShader in subShaders:\n glDeleteShader( subShader )\n holder.data = program\n return program\n else:\n log.debug( 'Not done loading shader source yet' )\n holder.data = 0\n return None", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def _prepare_gl(self):\n # init gl\n shader = Shader()\n shader.attachShader(GL_VERTEX_SHADER, VERTEX_SHADER)\n shader.attachShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER)\n shader.linkProgram()\n self.shader = shader\n\n self._gl_uniforms = {}\n # cache uniform locations (much faster)\n self._gl_uniforms['tex'] = self._uloc('tex')\n self._gl_uniforms['color'] = self._uloc('color')\n self._gl_uniforms['mat_projection'] = self._uloc('mat_projection')\n self._gl_uniforms['mat_modelview'] = self._uloc('mat_modelview')\n self._gl_uniforms['mat_real_projection'] = self._uloc('mat_real_projection')\n self.vao_id = glGenVertexArrays(1)\n self.vbo_id = glGenBuffers(2)", "def initializeGL(self):\n # background color\n gl.glClearColor(0.8, 0.8, 0.8, 0)\n # Make initial data array.\n # compile the vertex shader\n vs = compile_shader(VERTEX, gl.GL_VERTEX_SHADER)\n # compile the geometry shader\n gs = compile_shader(GEOMETRY, gl.GL_GEOMETRY_SHADER)\n # compile the fragment shader\n fs = compile_shader(FRAGMENT, gl.GL_FRAGMENT_SHADER)\n # Link the programs.\n self.render_program = link_shaders(vs, gs, fs)\n # Compile the compute shader\n cs = compile_shader(COMPUTE, gl.GL_COMPUTE_SHADER)\n # Create the compute shader buffers.\n self.makeBuffers()\n #self.vbo = glvbo.VBO(self.attributes)\n self.vbo = gl.glGenBuffers(1)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.attributes.nbytes,\n self.attributes, gl.GL_DYNAMIC_COPY)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n self.ssbo = gl.glGenBuffers(1)\n gl.glBindBufferBase(gl.GL_SHADER_STORAGE_BUFFER, 1, self.ssbo)\n gl.glBufferData(gl.GL_SHADER_STORAGE_BUFFER, self.velocities.nbytes,\n self.velocities, gl.GL_DYNAMIC_COPY)\n self.compute_program = link_shaders(cs)", "def _prepare_transforms(self, view):\n raise NotImplementedError()\n # Todo: this method can be removed if we somehow enable the shader\n # to specify exactly which transform functions it needs by name. For\n # example:\n #\n # // mapping function is automatically defined from the\n # // corresponding transform in the view's TransformSystem\n # gl_Position = visual_to_render(a_position);\n #", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n glBindTexture( GL_TEXTURE_BUFFER, self.texture( mode ) )\n vbo = value.vbo(mode)\n vbo.bind()\n try:\n glTexBuffer( GL_TEXTURE_BUFFER, self.get_format(), int(vbo) )\n finally:\n vbo.unbind()\n return True \n return False", "def link_shader_program(vertex_shader, fragment_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glAttachShader(program, fragment_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def extractUniforms(constants, refMatrix):\n uvOffsetScale = constants['$Globals']['webgl_fa7f624db8ab37d1']\n mdata = constants['$Globals']['webgl_3c7b7f37a9bd4c1d']\n matrix = Matrix([\n mdata[0:4],\n mdata[4:8],\n mdata[8:12],\n [0, 0, 0, 1],\n ])\n if refMatrix is None:\n # Rotate around Y because Google Maps uses X as up axis\n refMatrix = Matrix.Rotation(-pi/2, 4, 'Y') @ matrix.inverted()\n matrix = refMatrix @ matrix\n \n matrix[0][3] *= .0039\n matrix[1][3] *= .0039\n matrix[2][3] *= .0039\n\n return uvOffsetScale, matrix, refMatrix", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n value.render( mode.visible, mode.lighting, mode )\n return True \n return False", "def link_shader_program(vertex_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def get_vf_matrix(self, geom_dict, view_matrix, obstr_matrix, list_pvrow):\n n_all_surfaces = view_matrix.shape[0]\n view_factors = np.zeros((n_all_surfaces, n_all_surfaces), dtype=float)\n\n # --- First deal with finite surfaces from the registry, and treat only\n # half of the views because symmetry will be used next\n n_finite_surfaces = n_all_surfaces - 1 # no sky\n view_matrix_upper_finite_surfaces = np.triu(\n view_matrix[:n_finite_surfaces, :n_finite_surfaces])\n indices_views_finite = np.where(view_matrix_upper_finite_surfaces)\n\n n_views = len(indices_views_finite[0])\n geometries = list(geom_dict.values())\n for i in range(n_views):\n idx = (indices_views_finite[0][i], indices_views_finite[1][i])\n view = self.mapper.reverse_view[view_matrix[idx]]\n line_i = geometries[idx[0]]\n line_j = geometries[idx[1]]\n obstr_index = obstr_matrix[idx]\n if obstr_index is not None:\n obstructing_pvrow = list_pvrow[obstr_matrix[idx]]\n else:\n obstructing_pvrow = None\n # The following line takes the most time to execute (looped)\n view_factors[idx] = self.mapper.function_mapping[view](\n line_i, line_j, obstructing_pvrow)\n\n # Use the reciprocity property of view factors to speed up the\n # vfactor calculation: A_1 * F_1-2 = A_2 * F_2-1 ==> symmetric matrx\n areas = np.array([surf.length for surf in geometries])\n matrix_areas = np.diag(areas)\n matrix_areas_inv = np.diag(1. / areas)\n\n upper_matrix_reciprocity = np.dot(matrix_areas,\n view_factors[:n_finite_surfaces,\n :n_finite_surfaces])\n\n total_matrix_reciprocity = (upper_matrix_reciprocity +\n upper_matrix_reciprocity.T)\n finite_vf_matrix = np.dot(matrix_areas_inv, total_matrix_reciprocity)\n view_factors[:n_finite_surfaces, :n_finite_surfaces] = finite_vf_matrix\n\n # --- Then do the calculations for the sky, which is the remaining\n # portion of the hemisphere\n view_factors[:-1, -1] = 1. - np.sum(view_factors[:-1, :-1], axis=1)\n return view_factors", "def render(self):\n glPushMatrix()\n glMultMatrixf(np.transpose(self.translation_matrix))\n glMultMatrixf(self.scaling_matrix)\n color = color.COLORS[self.color_index]\n glColor3f(color[0], color[1], color[2])\n\n if self.selected:\n # Emit light\n glMaterialfv(GL_FRONT, GL_EMISSION, [0.0, 0.0, 0.0])\n\n glPopMatrix()", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def uniform_matrixf(self, name, mat):\n if self.getStatus():\n loc = glGetUniformLocation(self.program, name)\n glUniformMatrix4fv(loc, 1, False, (ctypes.c_float * 16)(*mat))" ]
[ "0.63088703", "0.59239346", "0.584722", "0.58375233", "0.57614106", "0.5694943", "0.5689576", "0.56452876", "0.5627314", "0.5600046", "0.55844826", "0.5583802", "0.54889846", "0.546197", "0.5442606", "0.54422736", "0.5416303", "0.5368626", "0.5350518", "0.5317088", "0.53103125", "0.5290774", "0.52780485", "0.52528393", "0.52455336", "0.5198585", "0.5183991", "0.5159115", "0.51236045", "0.50781953" ]
0.7835183
0
Calculates the projection matrix and passes it to a given shader program.
def setup_projection(self, shader_program): projection_mat = np.array([(2.0*self.near)/(self.right-self.left), 0.0, 0.0, 0.0, 0.0, ((2.0*self.near)/(self.top-self.bottom)), 0.0, 0.0, ((self.right+self.left)/(self.right-self.left)), ((self.top+self.bottom)/(self.top-self.bottom)), ((-1.0*(self.far+self.near)) / (self.far-self.near)), -1.0, 0.0, 0.0, ((-2.0*self.far*self.near)/(self.far-self.near)), 0.0], dtype=np.float32) projection_location = glGetUniformLocation(shader_program, "projection") glUniformMatrix4fv(projection_location, 1, GL_FALSE, projection_mat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_view(self, shader_program):\n n = self.normalize(self.eyepoint - self.lookat)\n u = self.normalize(np.cross(self.normalize(self.up), n))\n v = self.normalize(np.cross(n, u))\n\n view_mat = np.array([u[0], v[0], n[0], 0.0,\n u[1], v[1], n[1], 0.0,\n u[2], v[2], n[2], 0.0,\n -np.dot(u, self.eyepoint),\n -np.dot(v, self.eyepoint),\n -np.dot(n, self.eyepoint), 1.0],\n dtype=np.float32)\n\n view_location = glGetUniformLocation(shader_program, \"view\")\n glUseProgram(shader_program)\n glUniformMatrix4fv(view_location, 1, GL_FALSE, view_mat)", "def setup(self, shader_program):\n self.setup_view(shader_program)\n self.setup_projection(shader_program)", "def _load_projection(self):\n input_dim = self.filter_dims\n self.projection = nn.Linear(input_dim, self.char_cnn_output_dim, bias=True)\n weight = self.npz_weights['W_proj']\n bias = self.npz_weights['b_proj']\n self.projection.weight.data.copy_(torch.div(torch.FloatTensor(np.transpose(weight)), 10.0))\n self.projection.bias.data.copy_(torch.div(torch.FloatTensor(np.transpose(bias)), 10.0))\n self.projection.weight.requires_grad = self._finetune_pretrained_weights\n self.projection.bias.requires_grad = self._finetune_pretrained_weights", "def compute_projection_matrix(width,\n height,\n f_x,\n f_y,\n c_x,\n c_y,\n near,\n far):\n # pylint: disable=line-too-long\n matrix = vtk.vtkMatrix4x4()\n matrix.Zero()\n matrix.SetElement(0, 0, 2*f_x/width)\n matrix.SetElement(0, 1, -2*0/width) # Not doing skew, so this will be 0.\n matrix.SetElement(0, 2, (width - 2*c_x)/width)\n matrix.SetElement(1, 1, 2*f_y/height)\n matrix.SetElement(1, 2, (-height + 2*c_y)/height)\n matrix.SetElement(2, 2, (-far-near)/(far-near))\n matrix.SetElement(2, 3, -2*far*near/(far-near))\n matrix.SetElement(3, 2, -1)\n return matrix", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def compile(self, mode, shader):\n holder = self.holderDepend( mode.cache.holder(self,None) )\n # TODO: depend on shader.material as well...\n # TODO: the compiled shader needs to depend on *everything* \n # down the set of objects...\n program = glCreateProgram()\n holder.data = program\n subShaders = []\n for shader in self.shaders:\n # TODO: cache links...\n subShader = shader.compile()\n if subShader:\n glAttachShader(program, subShader )\n subShaders.append( subShader )\n elif shader.source:\n log.warn( 'Failure compiling: %s %s', shader.compileLog, shader.url or shader.source )\n if len(subShaders) == len(self.shaders):\n glLinkProgram(program)\n glUseProgram( program )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.bind( self, mode, i ):\n i += 1\n \n glValidateProgram( program )\n validation = glGetProgramiv( program, GL_VALIDATE_STATUS )\n if validation == GL_FALSE:\n self.compileLog += \"\"\"Validation failure (%s): %s\"\"\"%(\n validation,\n glGetProgramInfoLog( program ),\n )\n program = False \n else:\n link_status = glGetProgramiv( program, GL_LINK_STATUS )\n if link_status == GL_FALSE:\n self.compileLog += \"\"\"Link failure (%s): %s\"\"\"%(\n link_status,\n glGetProgramInfoLog( program ),\n )\n program = False\n for subShader in subShaders:\n glDeleteShader( subShader )\n holder.data = program\n return program\n else:\n log.debug( 'Not done loading shader source yet' )\n holder.data = 0\n return None", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def _prepare_gl(self):\n # init gl\n shader = Shader()\n shader.attachShader(GL_VERTEX_SHADER, VERTEX_SHADER)\n shader.attachShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER)\n shader.linkProgram()\n self.shader = shader\n\n self._gl_uniforms = {}\n # cache uniform locations (much faster)\n self._gl_uniforms['tex'] = self._uloc('tex')\n self._gl_uniforms['color'] = self._uloc('color')\n self._gl_uniforms['mat_projection'] = self._uloc('mat_projection')\n self._gl_uniforms['mat_modelview'] = self._uloc('mat_modelview')\n self._gl_uniforms['mat_real_projection'] = self._uloc('mat_real_projection')\n self.vao_id = glGenVertexArrays(1)\n self.vbo_id = glGenBuffers(2)", "def compute_projection(M):\n P = torch.mm(M, torch.pinverse(M.T.matmul(M)).matmul(M.T))\n P = P.double()\n return P", "def parallel_projection(self, state):\n self.camera.parallel_projection = state\n self.Modified()", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def link_shader_program(vertex_shader, fragment_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glAttachShader(program, fragment_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def draw(self, proj_mat, view_mat, time=0):\n if self.mesh_shader:\n self.mesh_shader.draw(self, proj_mat, view_mat, time=time)", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def _ProjectionMatrix(near, far, fov, aspectRatio):\r\n # Matrices are considered to be M[row][col]\r\n # Use DirectX convention, so need to do rowvec*Matrix to transform\r\n size = 1 / tan(radians(fov)/2.0)\r\n M = [[0] * 4 for i in range(4)]\r\n M[0][0] = size/aspectRatio\r\n M[1][1] = size #negative value reflects scene on the Y axis\r\n M[2][2] = (far + near) / (far - near)\r\n M[2][3] = 1\r\n M[3][2] = -(2 * far * near)/(far - near)\r\n return array(M, dtype=float)", "def _projection(self, name, inputs, out_shape):\n total_in_size = int(np.prod(inputs.get_shape().dims[1:]))\n total_out_size = int(np.prod(out_shape))\n weights = tf.get_variable(name,\n shape=(total_in_size, total_out_size),\n dtype=inputs.dtype,\n initializer=self._initializer)\n\n flat_in = tf.reshape(inputs, (tf.shape(inputs)[0], total_in_size))\n return tf.reshape(tf.matmul(flat_in, weights),\n (tf.shape(inputs)[0],) + out_shape)", "def projection_matrix(self) -> TransformationMatrixType:\n if self._projection_matrix is None:\n if self.projection_mode == Projection.TOP_DOWN:\n self._projection_matrix = self.orthographic_matrix\n else:\n self._projection_matrix = self.perspective_matrix\n\n return self._projection_matrix", "def render(self, proj):\n if self.text == '' or not self.mesh:\n return\n\n model = self.model.getTransformation()\n mvp = proj * self.transform.getTransformation() * model\n\n gl.glEnable(gl.GL_FRAMEBUFFER_SRGB)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n self.shader.bind()\n if self.color:\n self.shader.setUniform('u_color', self.color)\n self.font.bindAtlas()\n self.shader.setUniform('T_MVP', mvp)\n self.mesh.draw()\n gl.glDisable(gl.GL_BLEND)\n self.shader.unbind()\n self.font.unbindAtlas()\n gl.glDisable(gl.GL_FRAMEBUFFER_SRGB)", "def __call__(self):\n #Check source type and sensor type, then call appripriate methods to \n #generate intermediate data, cascading all the way back to geometry \n #calculation if it wasn't already done.\n #Then return a projection matrix...\n\n # NOTE: returned projection_matrix is a numpy.ndarray\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n projection_matrix = self.eeg_gain()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n projection_matrix = self.meg_gain()\n\n return projection_matrix", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def start_projection(self):\r\n self.paint_project_button(True)\r\n self.app.canvas = ProjectionCanvas(self.app)\r\n self.app.canvas.load()#512, 384)\r\n self.app.canvas.start()", "def render( self, shader, mode ):\n location = shader.getLocation( mode, self.name, uniform=False )\n if location is not None and location != -1:\n vbo = self.buffer.bind( mode )\n glVertexAttribPointer( \n location, self.size, GL_FLOAT, False, self.stride, \n vbo+self.offset\n )\n glEnableVertexAttribArray( location )\n return (vbo,location)\n return None", "def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n result = glGetShaderiv(vertex_shader, GL_COMPILE_STATUS)\n if result:\n print('Vertex shader compilation successful.')\n else:\n print('Vertex shader compilation FAILED:')\n print(glGetShaderInfoLog(vertex_shader))\n sys.exit(-1)\n\n fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fragment_shader, open('shaders/fs-phong-interp.c','r').read())\n glCompileShader(fragment_shader)\n result = glGetShaderiv(fragment_shader, GL_COMPILE_STATUS)\n if result:\n print('Fragment shader compilation successful.')\n else:\n print('Fragment shader compilation FAILED:')\n print(glGetShaderInfoLog(fragment_shader))\n sys.exit(-1)\n\n shaders = glCreateProgram()\n glAttachShader(shaders,vertex_shader)\n glAttachShader(shaders,fragment_shader)\n glLinkProgram(shaders)", "def gluProject( baseFunction, objX, objY, objZ, model=None, proj=None, view=None ):\n if model is None:\n model = GL.glGetDoublev( GL.GL_MODELVIEW_MATRIX )\n if proj is None:\n proj = GL.glGetDoublev( GL.GL_PROJECTION_MATRIX )\n if view is None:\n view = GL.glGetIntegerv( GL.GL_VIEWPORT )\n winX = _simple.GLdouble( 0.0 )\n winY = _simple.GLdouble( 0.0 )\n winZ = _simple.GLdouble( 0.0 )\n result = baseFunction( \n objX,objY,objZ,\n model,proj,view,\n winX,winY,winZ,\n )\n # On Ubuntu 9.10 we see a None come out of baseFunction,\n # despite it having a return-type specified of GLint!\n if result is not None and result != _simple.GLU_TRUE:\n raise ValueError( \"\"\"Projection failed!\"\"\" )\n return winX.value, winY.value, winZ.value", "def draw(self, shader=None, txtrs=None, ntl=None, shny=None, camera=None, mlist=[]):\r\n self.load_opengl() # really just to set the flag so _unload_opengl runs\r\n\r\n from pi3d.Camera import Camera\r\n\r\n camera = camera or self._camera or Camera.instance()\r\n shader = shader or self.shader\r\n shader.use()\r\n\r\n if self.MFlg == True or len(mlist):\r\n # Calculate rotation and translation matrix for this model using numpy.\r\n self.MRaw = dot(self.tr2,\r\n dot(self.scl,\r\n dot(self.roy,\r\n dot(self.rox,\r\n dot(self.roz, self.tr1)))))\r\n # child drawing addition #############\r\n newmlist = [m for m in mlist]\r\n newmlist.append(self.MRaw)\r\n if len(self.children) > 0:\r\n for c in self.children:\r\n c.draw(shader, txtrs, ntl, shny, camera, newmlist)\r\n for m in mlist[-1::-1]:\r\n self.MRaw = dot(self.MRaw, m)\r\n ######################################\r\n self.M[0:16] = self.MRaw.ravel()\r\n #self.M[0:16] = c_floats(self.MRaw.reshape(-1).tolist()) #pypy version\r\n self.M[16:32] = dot(self.MRaw, camera.mtrx).ravel()\r\n #self.M[16:32] = c_floats(dot(self.MRaw, camera.mtrx).reshape(-1).tolist()) #pypy\r\n self.MFlg = False\r\n\r\n elif camera.was_moved:\r\n # Only do this if it's not done because model moved.\r\n self.M[16:32] = dot(self.MRaw, camera.mtrx).ravel()\r\n\r\n if camera.was_moved:\r\n self.unif[18:21] = camera.eye[0:3]\r\n\r\n opengles.glUniformMatrix4fv(shader.unif_modelviewmatrix, 2,\r\n ctypes.c_int(0),\r\n ctypes.byref(self.M))\r\n\r\n opengles.glUniform3fv(shader.unif_unif, 20, ctypes.byref(self.unif))\r\n for b in self.buf:\r\n # Shape.draw has to be passed either parameter == None or values to pass\r\n # on.\r\n b.draw(self, shader, txtrs, ntl, shny)" ]
[ "0.608692", "0.60102975", "0.57958746", "0.5765427", "0.5765062", "0.572129", "0.5591015", "0.54633194", "0.5393662", "0.53903425", "0.5371064", "0.5305471", "0.5304222", "0.52888876", "0.5282101", "0.5274056", "0.5265391", "0.52569264", "0.52486914", "0.5241514", "0.5236431", "0.52202815", "0.5207362", "0.5161988", "0.51583475", "0.5139823", "0.51388097", "0.5137581", "0.51282525", "0.50970066" ]
0.7891201
0
Returns a given vector in normalized form.
def normalize(vector): return vector / np.linalg.norm(vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_normalized_vector(vector):\n # WARN: Zero length may cause problems!\n vector_lenght = get_vector_length(vector)\n if vector_lenght != 0:\n return np.divide(vector, get_vector_length(vector))\n else:\n return [0, 0]", "def normalizeVector(v):\n normalizer = 1.0 / sum(v)\n\n normalized = [i * normalizer for i in v]\n return normalized", "def normalize(v):\n return v / np.linalg.norm(v)", "def normalize(v):\n return np.array(v) / np.linalg.norm(v)", "def normalize_vector(vector):\n v = np.divide(vector, np.linalg.norm(vector))\n return np.round(v, decimals=4)", "def normalized(v):\n norm = np.linalg.norm(v)\n if norm:\n return np.array(v) / norm\n else:\n return v", "def normalize(vec):\n return vec / length(vec)", "def vector_normalize(x):\n mag = math.sqrt(vector_dot(x, x))\n return [float(i) / mag for i in x]", "def normalize(self, vec):\n length = math.sqrt( vec[0,0]*vec[0,0] + vec[0,1]*vec[0,1] + vec[0,2]*vec[0,2] )\n vnorm = vec / length\n return vnorm", "def normalized(vec):\n l = norm(vec)\n if l != 0.0:\n return vec / l\n else:\n raise ArithmeticError('Zero vector can\\'t be normalized!')", "def normalize(my_vector):\n my_vector = np.array(my_vector)\n size = len(my_vector)\n\n sum_ = sum(my_vector)\n if sum_ != 0.0:\n for i in range(size):\n my_vector[i] = my_vector[i] / sum_\n return my_vector", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def normalize_vector (vector ):\r\n\r\n if (np.sum (vector ) == 0):\r\n #print (\"In normalize_vector: Vector is 0. Returning input vector.\")\r\n return vector\r\n\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n vector = np.array(vector)\n if np.linalg.norm(vector) <= 0.00010:\n normv = 1.0\n else:\n normv = np.linalg.norm(vector)\n return vector / normv", "def normalize(v):\n det = math.sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2])\n return [v[0] / det, v[1] / det, v[2] / det]", "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)" ]
[ "0.8575221", "0.83980185", "0.8287421", "0.8287125", "0.82777345", "0.82405454", "0.81163174", "0.81054735", "0.8068462", "0.80491674", "0.79364955", "0.78791", "0.7850027", "0.7793986", "0.7793986", "0.7793986", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77850926", "0.77833325", "0.77818364", "0.7781108" ]
0.8422281
1
Calculate the Planck function for given frequency range and temperature in units of W sr^1 m^2 Hz^1
def planck_f(nu, T): return ((2*h*nu**3)/(c**2))*(1./(np.exp((h*nu)/(k*T))-1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def planckian(temp, wavelength):\n if wavelength==560: return 100.0\n if temp<60: temp=60 # For simplicity, in very low temperature\n num = wavelength**(-5)\n try:\n v=num / (math.exp(0.0143877687750393/(wavelength*(10**(-9))*temp)) - 1)\n except:\n print(temp)\n print(wavelength)\n raise ValueError\n v2=(560.0**(-5)) / (math.exp(0.0143877687750393/(560.0*(10**(-9))*temp)) - 1)\n return v*100.0/v2", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def Planck(T, wav):\n\twav_cm=wav*1.e-7 #convert wavelengths from nm to cm.\n\tc=2.99792e10 #speed of light, in cm/s\n\th=6.62607e-27#Planck constant, in erg*s\n\tkb=1.38065e-16#Boltzmann constant, in erg/K\n\t\n\timport numpy as np\n\tresult_cm=(2.*h*c**2./wav_cm**5.)*1./(np.exp(h*c/(wav_cm*kb*T))-1) #ergs/cm^3/s/steradian \n\t#Will return RunTime warnings for extremal values, which occur at these wavelengths. \n\tresult=result_cm*1.e-7 #convert to units of ergs/cm^2/nm/s/steradian \n\treturn result #result is in units of ", "def planck_B_nu(freq, T):\n import numpy as np\n from astropy import units as u\n from astropy import constants as c\n\n if isinstance(T, u.quantity.Quantity):\n use_units = True\n else:\n T = T * u.K\n use_units = False\n\n if not isinstance(freq, u.quantity.Quantity):\n freq *= u.Hz\n\n T = np.array(T.value, ndmin=1) * T.unit\n freq = np.array(freq.value, ndmin=1) * freq.unit\n\n f_ov_T = freq[np.newaxis, :] / T[:, np.newaxis]\n mx = np.floor(np.log(np.finfo(f_ov_T.ravel()[0].value).max))\n exp = np.minimum(f_ov_T * c.h / c.k_B, mx)\n exp = np.maximum(exp, -mx)\n\n output = 2 * c.h * freq**3 / c.c**2 / (np.exp(exp) - 1.0) / u.sr\n\n cgsunit = 'erg/(s*sr*cm**2*Hz)'\n if use_units:\n return output.to(cgsunit).squeeze()\n else:\n return output.to(cgsunit).value.squeeze()", "def distribution_planck_lambda(wavelength=1,temperature=1, units=SI,printA=False):\n\n var = sy.var('pi h c l k t')\n par = np.pi, units['h'], units['c'], wavelength, units['k'], temperature\n\n y = ( 8 * pi * h * c ) / l**5 / ( sy.exp(h*c/l/k/t) - 1 )\n\n return dic_result(var,par,y)", "def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda", "def fnutofwave(warr, farr):\n c= 2.99792458e18 #spped of light in Angstroms/s\n return farr*c/warr**2", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def temperature() -> float:", "def planck(\n wavel_points: np.ndarray, temperature: float, scaling: float\n ) -> np.ndarray:\n\n planck_1 = (\n 2.0 * constants.PLANCK * constants.LIGHT**2 / (1e-6 * wavel_points) ** 5\n )\n\n planck_2 = (\n np.exp(\n constants.PLANCK\n * constants.LIGHT\n / (1e-6 * wavel_points * constants.BOLTZMANN * temperature)\n )\n - 1.0\n )\n\n return 1e-6 * math.pi * scaling * planck_1 / planck_2 # (W m-2 um-1)", "def ctof(temp):\n return temp * 9/5 + 32 # functions should be surrounded by 2 blank lines", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def partition_function(array, temp):\r\n\r\n # Constants imported from scipy.constants\r\n h = scipy.constants.h # Planck's constant\r\n # speed of light must be in cm/s as wavenumber is in cm-1\r\n c = scipy.constants.c * 100\r\n k = scipy.constants.k # Boltzmann constant\r\n T = temp # extracted from log file using extract_temp()\r\n\r\n # check if inputs are numpy arrays and convert if not.\r\n if not isinstance(array, np.ndarray):\r\n np.asarray(array)\r\n\r\n # conversion to exponent\r\n u = (h * array * c) / (k * T)\r\n\r\n # calculates natural log of an individual frequency contribution to the partition function\r\n Q_ = np.log(np.exp(-(u / 2)) / (1 - np.exp(-u)))\r\n # sums all the contributions together, giving the final result.\r\n Q = np.sum(Q_)\r\n return Q", "def butterworth_filter(freq):\n\tf_raw = 1/(0.00000002*100*33)\n\tb = np.array([[-32092,15750],[-31238,14895]])*2.0**(-14)\n\tomega = 2*np.pi*freq/f_raw\n\te1, e2 = np.exp(-1j*omega), np.exp(-2j*omega)\n\ttmp = (1+2*e1+e2)**2/(1+b[0,0]*e1+b[0,1]*e2)/(1+b[1,0]*e1+b[1,1]*e2)\n\treturn tmp * (1+sum(b[0]))*(1+sum(b[1]))/16", "def calc_q_gain(Tfl, Tabs, q_rad_Whperm2, DT, Tin, Tout, aperture_area_m2, c1, c2, Mfl, delts, Cp_waterglycol, C_eff, Te):\n\n xgain = 1\n xgainmax = 100\n exit = False\n while exit == False:\n qgain_Whperm2 = q_rad_Whperm2 - c1 * (DT[1]) - c2 * abs(DT[1]) * DT[1] # heat production from solar collector, eq.(5)\n\n if Mfl > 0:\n Tout = ((Mfl * Cp_waterglycol * Tin) / aperture_area_m2 - (C_eff * Tin) / (2 * delts) + qgain_Whperm2 + (\n C_eff * Tfl[1]) / delts) / (Mfl * Cp_waterglycol / aperture_area_m2 + C_eff / (2 * delts)) # eq.(6)\n Tfl[2] = (Tin + Tout) / 2\n DT[2] = Tfl[2] - Te\n qdiff = Mfl / aperture_area_m2 * Cp_waterglycol * 2 * (DT[2] - DT[1])\n else:\n Tout = Tfl[1] + (qgain_Whperm2 * delts) / C_eff # eq.(8)\n Tfl[2] = Tout\n DT[2] = Tfl[2] - Te\n qdiff = 5 * (DT[2] - DT[1])\n\n if abs(qdiff < 0.1):\n DT[1] = DT[2]\n exit = True\n else:\n if xgain > 40:\n DT[1] = (DT[1] + DT[2]) / 2\n if xgain == xgainmax:\n exit = True\n else:\n DT[1] = DT[2]\n xgain += 1\n\n # FIXME: redundant...\n # qout = Mfl * Cp_waterglycol * (Tout - Tin) / aperture_area\n # qmtherm = (Tfl[2] - Tfl[1]) * C_eff / delts\n # qbal = qgain - qout - qmtherm\n # if abs(qbal) > 1:\n # qbal = qbal\n return qgain_Whperm2", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def test_width_z_to_f_f(mf, ncf, t3f, qf):\n pre = EL / (SW * CW)\n gl = pre * (t3f - qf * SW**2)\n gr = -pre * qf * SW**2\n\n def msqrd_z_to_f_f(_):\n return (\n ncf\n * 2\n / 3\n * (6 * gl * gr * mf**2 + (gl**2 + gr**2) * (-(mf**2) + MZ**2))\n )\n\n mr2 = (mf / MZ) ** 2\n af = t3f - qf * SW**2\n bf = -qf * SW**2\n\n analytic = (\n ncf\n * EL**2\n * MZ\n / (24 * np.pi * CW**2 * SW**2)\n * np.sqrt(1 - 4 * mr2)\n * ((af**2 + bf**2) * (1 - mr2) + 6 * af * bf * mr2)\n )\n\n phase_space = Rambo(MZ, [mf, mf], msqrd=msqrd_z_to_f_f)\n width = phase_space.decay_width(10_000, seed=SEED)[0]\n\n assert width == pytest.approx(analytic)", "def tconst_filter(freq, tau):\n\treturn 1/(2*np.pi*1j*freq*tau+1)", "def gas_fvf2(unit='unit1', z=0.8, temp=186, pressure=2000):\n if unit == 'unit1':\n return(0.00503676 * z * temp / pressure) \n if unit == 'unit2':\n return(0.350958 * z * temp / pressure)", "def curly_F_tau(Teff, tau):\n\n return 2*np.pi*(trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, t-tau), tau, 20, 5000)-trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, tau-t), 0, tau, 5000))", "def Fritz11(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def Fitzpactrick09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def kf_continuous(f, amp, tr_fac, del_fac, t):\n T = 1 / f\n T_sinu = 0.5 * T\n delay = del_fac * T\n tr = tr_fac * T\n beta = 1 - (2 * tr / T)\n\n t_T1 = 0\n t_T2 = (T * (1 - beta) / 4)\n t_T3 = (T * (1 + beta) / 4)\n t_T4 = (T * (3 - beta) / 4)\n t_T5 = (T * (3 + beta) / 4)\n t_T6 = T\n\n t = np.mod(t - delay, T)\n if t_T1 <= t < t_T2:\n f_value = amp / 2 + amp / 2 * np.cos(\n (2 * np.pi * t) / (T_sinu * (1 - beta)))\n elif t_T2 <= t < t_T3:\n f_value = 0\n elif t_T3 <= t < t_T4:\n f_value = -amp / 2 - amp / 2 * np.cos(\n (2 * np.pi * (t - (beta * T / 2))) / (T_sinu * (1 - beta)))\n elif t_T4 <= t < t_T5:\n f_value = 0\n elif t_T5 <= t <= t_T6:\n f_value = amp / 2 + amp / 2 * np.cos(\n (2 * np.pi * (t - beta * T)) / (T_sinu * (1 - beta)))\n return f_value\n # ------------------------------------------\n\n\n # revolving wing kinematics with sinusiodal ramp function", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def planck_w(lam, T):\n return ((2*h*c**2)/(lam**5))*(1./(np.exp((h*c)/(lam*k*T))-1))", "def PlankFunction(wavelen,T=5778.):\n\n c1=1.191042E8\n c2=1.4387752E4\n L=c1/(wavelen**5*(np.exp(c2/(wavelen*T))-1))\n return L", "def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)" ]
[ "0.6799455", "0.61881703", "0.618216", "0.6136792", "0.6080317", "0.6044308", "0.59986985", "0.5993096", "0.5970151", "0.5967164", "0.59125924", "0.5905185", "0.5874795", "0.58429915", "0.58087075", "0.5794045", "0.5765954", "0.5755308", "0.5753622", "0.5728672", "0.57057214", "0.5704785", "0.56993055", "0.56939274", "0.56936574", "0.5691284", "0.56791973", "0.56773", "0.5676892", "0.56718415" ]
0.63436884
1
Update position of car according to the path
def update_pos(self): self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\ [min(self.y,len(self.pathX[self.x])-1)] self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\ [min(self.y,len(self.pathY[self.x])-1)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, path):\n self.current_location = (path[1][1], path[1][0])", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (self.rect.y == goal_y):\n # pygame.quit()\n # sys.exit(0)\n\n self.change_x = 0\n self.change_y = 0", "def update_position(position):\n pass", "def update_carsPosition(self ):\n self.positions = list()\n for car in self.fleet:\n self.positions.append(self.fleet[car].position)", "def updatePosition(self):\n\n #For this update, a time-step of 1 is assumed ->Change Code if not true\n self.position = [self.position[0] + self.velocity[0], self.position[1]+self.velocity[1]]", "def update(self):\n if self.dir == \"r\":\n self.vx = 10\n self.vy = 0\n elif self.dir == \"l\":\n self.vx = -10\n self.vy = 0\n elif self.dir == \"u\":\n self.vx = 0\n self.vy = -10\n elif self.dir == \"d\":\n self.vx = 0\n self.vy = 10\n elif self.dir == \"None\":\n self.vx = 0\n self.vy = 0\n self.x += self.vx\n self.y += self.vy", "def move_car(self):\n import interface\n self.reset_position()\n print(\"move car during %f s...\" % self.portion_duration*self.nbr_portions)\n\n X, Y = [], []\n t_debut = time.time()\n while time.time() - t_debut < self.portion_duration*self.nbr_portions:\n current_time = time.time() - t_debut\n # On fait bouger les 4 roues.\n for numero_roue, speed in enumerate(self(current_time)):\n print(numero_roue)\n interface.move_wheel(numero_roue+1, speed)\n\n # Recuperation de la position reele\n (x, y), _ = interface.get_position()\n X.append(x)\n Y.append(y)\n\n interface.move_wheel(\"\", 0) # La voiture s'arette a la fin.\n print(\"\\tterminate\")\n return x, y", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def update_pos(self, move):\n change = Maze.moves[move]\n self.current_pos[0] += change[0]\n self.current_pos[1] += change[1]", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def update_position(self):\n self.back = self.pos % self.road_len\n self.front = (self.pos + self.length) % self.road_len", "def update_position(self):\n self.position[0] += self.velocity[0]\n self.position[1] += self.velocity[1]", "def update(self, pos):\n\t\tpass", "def update_path():\n #TODO update path information\n pass", "def set_new_location(self, xPos, yPos):", "def move_by(self, path, env=None):\n env = self._find_env(env)\n old_pos = self.position(env)\n new_pos = [p + c for p, c in zip(old_pos, path)]\n env.move_agent(self, new_pos)", "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def move(self, id, move):\n vehicle = self.vehicles[id]\n if vehicle.orientation == 'H' :\n self.vehicles[id].x += move\n elif vehicle.orientation == 'V':\n self.vehicles[id].y += move\n self.fill_field(list(self.vehicles.values()))\n self.show_field(list(self.vehicles.values()), True)", "def odom_update(self, data):\n self.curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def update_position(self, dt):\n self._x += self._vx * dt\n self._y += self._vy * dt\n\n # TODO: Add timer countdown if infected and logic for dying/recovering.", "def move(self, p):\r\n self.position.setvalue(p)", "def update_obstacle_location(self):\n\n # find the previous location of the obstacle\n old_y = self.map_obstacle.y\n old_x = self.map_obstacle.x\n\n # remove it from the main graph\n self.main_graph[old_y][old_x].contents.remove(self.map_obstacle)\n\n # get the latest location\n self.map_obstacle.update_location()\n (new_y, new_x) = (self.map_obstacle.y, self.map_obstacle.x)\n\n # add it back into the main graph\n self.main_graph[new_y][new_x].contents.add(self.map_obstacle)\n\n # update the map obstacle (not necessary, but it doesn't hurt)\n self.map_obstacle.y = new_y\n self.map_obstacle.x = new_x", "def update_position(self, p):\n if self.track:\n our_p = self.c.p\n assert self.gnx == our_p.gnx\n else:\n our_p = self.get_position()\n\n if p.gnx == our_p.gnx:\n self.update_position_edit(p)\n if self.update:\n self.update_position_view(p)", "def update_position(self):\n self.current_position = utility_methods.cylindrical(self.current_position + self.rotation)\n\n self.rotation_list.append(self.current_position)", "def cambiovelocidad(self,x,y):\n self.change_x += x\n self.change_y += y", "def update_pos(self):\n s = self\n s.rpos = s.rects[0].inf\n s.pos = s.physics.scl_coord_res(s.rpos)", "def update(self, time_step):\n a = [0,0]\n F = self.force()\n for i in [0,1]: # We have to update x and y\n a[i] = self.force()[i] / self.mass\n self.velocity[i] = self.velocity[i] + a[i]*time_step\n self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy\n self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation", "def update_position(self, canvas):\n if self.x <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if self.x >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if self.y <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if self.y >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n if self.direction == \"N\":\n self.y -= 1\n if self.direction == \"NE\":\n self.y -= 1\n self.x += 1\n if self.direction == \"E\":\n self.x += 1\n if self.direction == \"SE\":\n self.x += 1\n self.y += 1\n if self.direction == \"S\":\n self.y += 1\n if self.direction == \"SW\":\n self.x -= 1\n self.y += 1\n if self.direction == \"W\":\n self.x -= 1\n if self.direction == \"NW\":\n self.y -= 1\n self.x -= 1", "def setPath(self, request, context):\n \n cmds = self.vehicle.commands\n coordFrame, alt = None, None\n waypoints = []\n \n # The idea behind stripping off the first position is to determine what reference frame to\n # to use. Future proto changes will removed the coordinate frame boolean flag from the \n # request making the code unnecessary. For now, this is the way it is.\n firstPosition = nth(request, 0)\n lat = firstPosition.lat\n lon = firstPosition.lon\n \n useRelativeAltitude = firstPosition.useRelativeAltitude\n \n if useRelativeAltitude:\n alt = firstPosition.relativeAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT\n else:\n alt = firstPosition.gpsAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL\n\n print ('First position at ({0},{1}) -> {2}'.format(lat, lon, alt))\n waypoints.append([lat, lon, alt])\n nextIndex = self.vehicle.commands.next\n # Make sure the drone is not in AUTO mode. \n #self.vehicle.mode = VehicleMode(\"LOITER\")\n self.clear_mission(cmds, coordFrame)\n \n # Add first position\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n \n # Add the remaining positions\n for position in request:\n lat = position.lat\n lon = position.lon\n if useRelativeAltitude:\n alt = position.relativeAltitude\n else:\n alt = position.gpsAltitude\n print ('Point at ({0},{1}) -> {2}'.format(lat, lon, alt))\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n waypoints.append([lat, lon, alt])\n \n print (\"Uploading new commands to drone\")\n cmds.upload()\n \n # Reset mission set to first (0) waypoint\n #if self.vehicle.commands.next !=0:\n # print \"Continuing mission...\"\n #else:\n # print \"Starting mission\"\n # self.vehicle.commands.next = 0\n if len(self.vehicle.waypoints)==0:\n print \"Starting mission\"\n self.vehicle.commands.next = 0\n else:\n print \"Continuing mission...\"\n self.vehicle.commands.next = nextIndex\n \n self.vehicle.waypoints = waypoints \n self.vehicle.mode = VehicleMode(\"AUTO\")\n \n self.print_mission() \n \n return droneconnect_pb2.Null()" ]
[ "0.6872859", "0.6630316", "0.6528707", "0.6485087", "0.6460367", "0.63058037", "0.6304079", "0.626132", "0.6236285", "0.6215101", "0.62078655", "0.6150422", "0.6096822", "0.6092482", "0.6079556", "0.60738695", "0.6034675", "0.6032831", "0.60220134", "0.600705", "0.5996151", "0.5981107", "0.59738964", "0.59718555", "0.5965237", "0.59611845", "0.5934941", "0.59146875", "0.59007365", "0.58701813" ]
0.68087614
1
Creates a PlatformParameterModel instance.
def create( cls, param_name: str, rule_dicts: List[platform_parameter_domain.PlatformParameterRuleDict], rule_schema_version: int, default_value: platform_parameter_domain.PlatformDataTypes ) -> PlatformParameterModel: return cls( id=param_name, rules=rule_dicts, rule_schema_version=rule_schema_version, default_value=default_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createParameter(self):\n return _libsbml.Model_createParameter(self)", "def createParameter(self, name, value):\r\n parameter = Parameter(self)\r\n self.callRemote('createParameter', name, value).chainDeferred(parameter)\r\n return parameter", "def remote_createParameter(self, name, value):\r\n return Parameter(self, name, value)", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def new_platform(self, id):\n p = Platform(self, id, [])\n self.platforms[id] = p\n return p", "def create_entity(self):\n \n if self.ORION_CB.get_entity(self.params['name']) is None:\n \n print('[INFO]: Create new PID entity')\n \n entity_dict = {\"id\":self.params['name'], \"type\":'PID_controller'}\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n entity_dict.update({attr:{'value':self.params[attr],'type':'Number'}})\n\n entity_dict.update({'reverse_act':{'value':self.params['reverse_act'],'type':'Text'}})\n \n entity = filip.orion.Entity(entity_dict)#, attrs)\n\n self.ORION_CB.post_entity(entity)\n \n else:\n print('Entity name already assigned')", "def build(cls, \n param_dir):\n with open(cls._parameters_file(param_dir)) as f:\n parameters = json.load(f)\n\n # Encapsulate training parameters\n training_parameters = TrainingParameters(parameters[\"training_epochs\"])\n\n # Encapsulate model hyperparameters\n model_parameters = ModelParameters(\n parameters[\"learning_rate\"],\n parameters[\"momentum\"],\n parameters[\"model\"],\n parameters[\"input_keep_probability\"],\n parameters[\"output_keep_probability\"],\n parameters[\"sequence_length\"],\n parameters[\"input_dimension\"],\n parameters[\"batch_size\"], \n parameters[\"state_size\"], \n parameters[\"n_layers\"],\n parameters[\"n_classes\"])\n\n # Encapsulate directories name\n directories = Directories(parameters[\"log_dir\"],\n parameters[\"checkpoint_dir\"])\n\n model = cls(\n model_parameters,\n training_parameters,\n directories)\n\n return model", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def __init__(__self__, *,\n name: pulumi.Input[str],\n type: pulumi.Input[str],\n composite_model_properties: Optional[pulumi.Input[Sequence[pulumi.Input['AssetModelPropertyArgs']]]] = None,\n description: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n if composite_model_properties is not None:\n pulumi.set(__self__, \"composite_model_properties\", composite_model_properties)\n if description is not None:\n pulumi.set(__self__, \"description\", description)", "def test_native(self):\n kwargs = dict(\n kind=POSITIONAL_ONLY,\n name='a',\n interface_name='b',\n default=None,\n type=int,\n )\n param = FParameter(**kwargs).native\n assert param.kind == kwargs['kind']\n assert param.name == kwargs['name']\n assert param.default == kwargs['default']\n assert param.annotation == kwargs['type']", "def from_json(cls, factory, json_data):\n data = deepcopy(json_data)\n\n parameters = []\n for parameter_data in data[\"parameters\"]:\n id = parameter_data[\"id\"]\n parameter_factory = factory.parameter_factory_by_id(id)\n parameter = parameter_factory.model_class.from_json(\n parameter_factory, parameter_data[\"model_data\"]\n )\n parameters.append(parameter)\n data[\"parameters\"] = parameters\n\n data[\"kpis\"] = [KPISpecification(**d) for d in data[\"kpis\"]]\n\n mco_model = factory.create_model(data)\n return mco_model", "def create_model(self, setting: SettingType) -> BaselineModel[SettingType]:\n # Create the model, passing the setting, hparams and config.\n return MyCustomModel(setting=setting, hparams=self.hparams, config=self.config)", "def createModel(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n\n model_name = self.newPassport().setAsStr(model_psp).name\n\n scheme = self.getScheme()\n if scheme:\n return scheme.getModel(model_name)\n else:\n log_func.warning(u'Error create data scheme object')\n return None", "def createModelFromParams(self, modelParams):\n model = ModelFactory.create(modelParams)\n model.enableInference({\"predictedField\": self.fieldToPredict})\n return model", "def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))", "def new_parameter(request, **_kwargs):\n return create_view(request, _(\"Parameter\"), ParameterForm)", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create_platform():\n if config.P_LIST == []:\n pitem = obstacle.Platform(\n randint(config.M.x_pos+2, common.COLS-5), randint(common.R1_R, common.MIDS_R-5))\n config.P_LIST.append(pitem)\n elif len(config.P_LIST) < int(common.COLS/20):\n if randint(0, 5) == 1:\n pos = config.P_LIST[-1].x_pos + randint(7, 15)\n if pos < (common.COLS - 3):\n pitem = obstacle.Platform(pos, randint(\n common.R1_R, common.MIDS_R-5))\n config.P_LIST.append(pitem)\n\n for i in config.P_LIST:\n xitem = randint(-3, 3)+i.x_pos\n i.move(xitem)", "def create_object_parameter(obj, tpclass, unique, tp = 'text', name = None, descr = None, values = []):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError('obj must be one of model classes with parametes, not {0}'.format(t))\n pclass = parameter_class_map[t]['param']\n pvlclass = parameter_class_map[t]['vl']\n prmt = pclass(obj = obj,\n tpclass = tpclass,\n unique = 1 if unique else None,\n tp = tp,\n name = name,\n enum = (True if (len(values) > 0) else False),\n descr = descr)\n prmt.save(force_insert=True)\n for vl in values:\n pvl = pvlclass(parameter = prmt,\n value = vl['value'],\n caption = vl.get('caption'))\n try:\n pvl.save(force_insert=True)\n except IntegrityError:\n pass # just ignore same values\n return prmt", "def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)", "def create_model(ModelName=None, PrimaryContainer=None, Containers=None, ExecutionRoleArn=None, Tags=None, VpcConfig=None, EnableNetworkIsolation=None):\n pass", "def get_parameter_dict(self):\n prm = ModelParameters()\n prm.define(\"a\", self.a)\n return prm", "def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)", "def platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"platform\", _args)\n return _ctx.execute_sync(Platform)", "async def async_create_platform_type(\n hass: HomeAssistant, config: ConfigType, p_type: str, p_config: dict\n) -> DeviceTrackerPlatform | None:\n platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)\n\n if platform is None:\n return None\n\n return DeviceTrackerPlatform(p_type, platform, p_config)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def CreateModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def make_architecture(self):\n self.arch = simple_mlp(num_inputs=self.p.model.num_inputs,\n num_outputs=self.p.model.num_outputs,\n params=self.p.model.arch)" ]
[ "0.6038443", "0.5898745", "0.57178617", "0.54691094", "0.5428996", "0.52719665", "0.52563184", "0.5229282", "0.5225646", "0.5200385", "0.5198576", "0.5181845", "0.5179588", "0.517855", "0.5129397", "0.51045793", "0.5091119", "0.50768375", "0.50669825", "0.5065463", "0.5036845", "0.50308007", "0.50225234", "0.50104576", "0.50061905", "0.500529", "0.500529", "0.49963576", "0.49866122", "0.49657586" ]
0.76449805
0
Get Map of each word count in a string
def get_word_count(my_str): my_list = my_str.split(" ") my_map = {} for word in my_list: # Strip the word from any character word = word.strip(".") word = word.strip(",") # Convert word to all lowercase word = word.lower() if word not in my_map: my_map[word] = 1 else: my_map[word] += 1 return my_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def word_count(poem):\n lines = [line for line in poem.split(\"\\n\") if line]\n word_map = {}\n for line in lines:\n for word in line.split(\" \"):\n if word:\n if word in word_map:\n word_map[word] += 1\n else:\n word_map[word] = 1\n return word_map", "def word_count(string):\n counts = dict()\n words = string.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return len(counts)", "def get_words(s):\n d = {}\n s = s.lower()\n for word in s.split():\n d[word] = d.get(word,0) + 1\n return d", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def get_letter_counts(str_):\n return dict(Counter(str_))", "def word_count(phrase):\n return collections.Counter(phrase.split())", "def word_count(text):\n\n # Tokenize text on whitespace / newline\n words = text.strip().split()\n\n # Create a dictionary from the set of tokens, initializing each count to 0\n counts = dict.fromkeys(words, 0)\n\n # Iterate over the text to count occurences of each token\n for word in words:\n counts[word] += 1\n\n # Return the counts\n return counts", "def count_tokens(self, words: Iterable[str]) -> Dict[str, int]:\r\n token_counts = Counter(words)\r\n return {\" \".join(token): count for token, count in token_counts.items()}", "def word_count(text):\n # Use a dictionary to store the words\n words = {}\n\n # Simple way to strip extra whitespace\n text = ' '.join(text.split())\n\n # Now iterate through, splitting on space\n for word in text.split(\" \"):\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\n return words", "def _count_words_in_string(self, sentence):\n word_count = dict()\n for i in sentence:\n if word_count.get(i) is None:\n word_count[i] = 1\n else:\n word_count[i] = word_count.get(i)+1\n\n return word_count", "def _word_counter(input_string: str) -> Dict[str, int]:\n # @todo Create a data type that can counts keys as they are added\n _current_word = ''\n parsed_words = {}\n\n for character in input_string.lower():\n if character in MaximalTextAnalyzer._letters:\n _current_word += character\n elif len(_current_word):\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n _current_word = ''\n\n # What if it does not end with a separator?\n if _current_word:\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n return parsed_words", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def word_count(s):\n # Your code here\n\n stop_char = r\"\"\":;\",.-+=/|[]{|}()*^\\&\"\"\"\n\n # Make sure special characters arent in string\n s_clean = \"\".join([x for x in s if x not in stop_char])\n\n # Lower case and remove trailing space\n word_list = s_clean.lower().split()\n\n # use cache to hold memory\n word_count = {}\n\n for x in word_list:\n\n if x not in word_count:\n # if not there, start it at 0\n word_count[x] = 0\n\n # if seen again, increase count\n word_count[x] += 1\n\n return word_count", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt", "def bow(tokens):\n return dict(collections.Counter(re.findall(r'\\w+', \" \".join(tokens))))", "def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def count_word_in_each_sentence(sentence):\n\tsentence = sentence.lower()\n\twords = sentence.split()\n\tcount_dict = dict()\n\tfor _ in words:\n\t\tif count_dict.get(_):\n\t\t\tcount_dict[_] += 1\n\t\telse:\n\t\t\tcount_dict[_] = 1\n\treturn count_dict", "def countwords(txt):\n\twords = {}\n\n\tpattern = re.compile(\"[a-zA-Z][a-zA-Z0-9]*\")\t\n\tfor word in pattern.findall(txt):\n\t\twords[word.lower()] = words.get(word,0)+1\t \n\t\n\t# i'd rather do this in the prior step\n\t# but i need to be able to eliminate dupes\n\t# which may or may not be more expensive than\n\t# going this route. need to benchmark it.\n\tfor key,word in words.items():\n\t\tapcount.setdefault(key,0)\n\t\tapcount[key]+=1\n\t\n\treturn words", "def countAll(word):\n counts = {}\n for char in word.lower():\n if char not in counts:\n counts[char] = 1\n else:\n counts[char] += 1\n return counts", "def make_word_num_map(words):\n\tword_num_map = dict()\n\tfor word in words:\n\t\tword_num_map[word] = word_num_map.get(word, 0) + 1\n\treturn word_num_map", "def word_count(phrase):\n Wordlist = phrase.replace(\"\\n\", ' ') # Creating a list without escape codes\n Wordlist = Wordlist.split(\" \") # Split the sentence in words\n dictionary = {} # Create an empty dictionary to store the results\n for i in Wordlist:\n if i != '': # unless is a ''\n dictionary[i] = Wordlist.count(i)\n return dictionary", "def count(words):\n\n values = []\n \n # dictionary whose keys are words and values number of occurrences\n D = {}\n\n for word in words:\n # if word is already in dict add 1 to the count\n try : D[word] +=1\n # otherwise add entrye to dict\n except : D[word] = 1\n\n values += [D[word]]\n\n return values" ]
[ "0.8051829", "0.7836102", "0.76682496", "0.76395476", "0.76118475", "0.7599304", "0.75386703", "0.7518596", "0.75086623", "0.74244153", "0.7382403", "0.7335036", "0.729379", "0.72932726", "0.72677284", "0.726587", "0.725742", "0.72382796", "0.7232885", "0.7225221", "0.7218572", "0.72163534", "0.7179361", "0.71793467", "0.7164414", "0.7154297", "0.71500236", "0.71354604", "0.71167386", "0.70915014" ]
0.79201156
1
Holds the neccessary content for the dockerfile for nginx
def get_dockerfile_content(self): dockerfile_content: List[str] = [ 'FROM nginx:latest', '# Update and install required packages', 'RUN apt-get update', 'RUN apt-get install vim -y', '', 'COPY ./.docker/config/nginx.conf /etc/nginx/conf.d/nginx.conf', '', 'ENTRYPOINT ["nginx"]', 'CMD ["-g","daemon off;"]' ] return dockerfile_content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_nginx_config(nginx_root: str, nginx_webroot: str, http_port: int, https_port: int,\n other_port: int, default_server: bool, key_path: Optional[str] = None,\n cert_path: Optional[str] = None, wtf_prefix: str = 'le') -> str:\n key_path = key_path if key_path \\\n else pkg_resources.resource_filename('certbot_integration_tests', 'assets/key.pem')\n cert_path = cert_path if cert_path \\\n else pkg_resources.resource_filename('certbot_integration_tests', 'assets/cert.pem')\n return '''\\\n# This error log will be written regardless of server scope error_log\n# definitions, so we have to set this here in the main scope.\n#\n# Even doing this, Nginx will still try to create the default error file, and\n# log a non-fatal error when it fails. After that things will work, however.\nerror_log {nginx_root}/error.log;\n\n# The pidfile will be written to /var/run unless this is set.\npid {nginx_root}/nginx.pid;\n\nuser {user};\nworker_processes 1;\n\nevents {{\n worker_connections 1024;\n}}\n\n# “This comment contains valid Unicode”.\n\nhttp {{\n # Set an array of temp, cache and log file options that will otherwise default to\n # restricted locations accessible only to root.\n client_body_temp_path {nginx_root}/client_body;\n fastcgi_temp_path {nginx_root}/fastcgi_temp;\n proxy_temp_path {nginx_root}/proxy_temp;\n #scgi_temp_path {nginx_root}/scgi_temp;\n #uwsgi_temp_path {nginx_root}/uwsgi_temp;\n access_log {nginx_root}/error.log;\n\n # This should be turned off in a Virtualbox VM, as it can cause some\n # interesting issues with data corruption in delivered files.\n sendfile off;\n\n tcp_nopush on;\n tcp_nodelay on;\n keepalive_timeout 65;\n types_hash_max_size 2048;\n\n #include /etc/nginx/mime.types;\n index index.html index.htm index.php;\n\n log_format main '$remote_addr - $remote_user [$time_local] $status '\n '\"$request\" $body_bytes_sent \"$http_referer\" '\n '\"$http_user_agent\" \"$http_x_forwarded_for\"';\n\n default_type application/octet-stream;\n\n server {{\n # IPv4.\n listen {http_port} {default_server};\n # IPv6.\n listen [::]:{http_port} {default_server};\n server_name nginx.{wtf_prefix}.wtf nginx2.{wtf_prefix}.wtf;\n\n root {nginx_webroot};\n\n location / {{\n # First attempt to serve request as file, then as directory, then fall\n # back to index.html.\n try_files $uri $uri/ /index.html;\n }}\n }}\n\n server {{\n listen {http_port};\n listen [::]:{http_port};\n server_name nginx3.{wtf_prefix}.wtf;\n\n root {nginx_webroot};\n\n location /.well-known/ {{\n return 404;\n }}\n\n return 301 https://$host$request_uri;\n }}\n\n server {{\n listen {other_port};\n listen [::]:{other_port};\n server_name nginx4.{wtf_prefix}.wtf nginx5.{wtf_prefix}.wtf;\n }}\n\n server {{\n listen {http_port};\n listen [::]:{http_port};\n listen {https_port} ssl;\n listen [::]:{https_port} ssl;\n if ($scheme != \"https\") {{\n return 301 https://$host$request_uri;\n }}\n server_name nginx6.{wtf_prefix}.wtf nginx7.{wtf_prefix}.wtf;\n\n ssl_certificate {cert_path};\n ssl_certificate_key {key_path};\n }}\n}}\n'''.format(nginx_root=nginx_root, nginx_webroot=nginx_webroot, user=getpass.getuser(),\n http_port=http_port, https_port=https_port, other_port=other_port,\n default_server='default_server' if default_server else '', wtf_prefix=wtf_prefix,\n key_path=key_path, cert_path=cert_path)", "def get_docker_compose_content(self):\n\n docker_compose_content: List[str] = [\n \" nginx:\",\n \" container_name: {}\".format(self.container_name),\n \" build:\",\n \" context: .\",\n \" dockerfile: .docker/{}\".format(self.dockerfile_name),\n \" volumes:\",\n \" - ./src:/var/www/src\",\n \" working_dir: /var/www/src\",\n \" ports:\",\n \" - '{}:{}'\".format(self.port, self.port),\n \" networks:\",\n \" - {}-network\".format(self.prefix)\n ]\n if self.depends_on_string != '':\n docker_compose_content.insert(2, self.depends_on_string)\n return docker_compose_content", "def setup():\n print(cyan('Configuring nginx on {}'.format(env.stage)))\n context = {\n 'ssl_letsencrypt': False,\n 'ssl_with_dhparam': False,\n 'ssl_cert': None,\n 'ssl_key': None,\n }\n\n if ctx('ssl.letsencrypt'):\n execute('letsencrypt.setup')\n elif ctx('ssl.key') and ctx('ssl.cert'):\n ssl = True\n dhparams = ctx('ssl.dhparam', default=False)\n key = ctx('ssl.key', default=False)\n cert = ctx('ssl.cert', default=False)\n\n if key and files.exists(key, use_sudo=True):\n context['ssl_key'] = ctx('ssl.key')\n if cert and files.exists(cert, use_sudo=True):\n context['ssl_cert'] = ctx('ssl.cert')\n if dhparams and files.exists(dhparams, use_sudo=True):\n context['ssl_with_dhparam'] = True\n if ssl:\n upload_template(\n 'nginx_ssl.template', ctx('nginx.config_path'), context=context)\n else:\n upload_template(\n 'nginx.template', ctx('nginx.config_path'), context=context)\n\n if files.exists(ctx('nginx.document_root'), use_sudo=True):\n sudo('chown -R {user}:{group} {path}'.format(\n path=ctx('nginx.document_root'), user=ctx('system.user'),\n group=ctx('system.group')))\n\n sudo('service nginx reload')", "def render_dockerfile(self):\n logger.info(\"Rendering Dockerfile...\")\n\n if self._params.get('redhat'):\n self._inject_redhat_defaults()\n\n self.image['pkg_manager'] = self._params.get('package_manager', 'yum')\n self.image.process_defaults()\n\n template_file = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'template.jinja')\n loader = FileSystemLoader(os.path.dirname(template_file))\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n env.globals['addhelp'] = self._params.get('addhelp')\n\n template = env.get_template(os.path.basename(template_file))\n\n dockerfile = os.path.join(self.target,\n 'image',\n 'Dockerfile')\n if not os.path.exists(os.path.dirname(dockerfile)):\n os.makedirs(os.path.dirname(dockerfile))\n\n with open(dockerfile, 'wb') as f:\n f.write(template.render(\n self.image).encode('utf-8'))\n logger.debug(\"Dockerfile rendered\")\n\n if self.image.get('help', {}).get('template', \"\"):\n help_template_path = self.image['help']['template']\n elif self._params.get('help_template'):\n help_template_path = self._params['help_template']\n else:\n help_template_path = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'help.jinja')\n\n help_dirname, help_basename = os.path.split(help_template_path)\n loader = FileSystemLoader(help_dirname)\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n help_template = env.get_template(help_basename)\n\n helpfile = os.path.join(self.target, 'image', 'help.md')\n with open(helpfile, 'wb') as f:\n f.write(help_template.render(\n self.image).encode('utf-8'))\n logger.debug(\"help.md rendered\")", "def nginx():\n\n get_details()\n\n context = {\n \"site_name\": env.site_name,\n \"paths\": env.paths,\n \"ip_address\": env.ip_address,\n \"site_is_secure\": env.site_is_secure,\n \"app_server\": env.app_server,\n }\n\n nginx_path = '/etc/nginx/sites-available'\n\n if exists(nginx_path):\n with cd(nginx_path):\n if exists(env.site_name):\n print \"nginx site configuration already exists!\"\n return\n else:\n upload_template(\"nginx_conf.txt\", \n env.site_name,\n context,\n use_jinja=True,\n template_dir=JINJA_TEMPLATE_PATH,\n use_sudo=True)\n print \"Created nginx site configuration file. Enabling site...\"\n sudo('ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s' % (env.site_name, env.site_name))\n #print \"Site enabled. Reloading nginx...\"\n #sudo('/etc/init.d/nginx reload')\n return\n else:\n print \"It doesn't seem like you have nginx installed.\"\n return", "def render_templates(self):\n\n # dockerfile\n try:\n t = self.templates.get_template(\n 'docker/dockerfiles/{}.dockerfile.template'.format(self.repo)\n )\n except TemplateNotFound:\n t = self.templates.get_template(\n 'docker/dockerfiles/default.dockerfile.template'\n )\n\n self.files.append({\n 'name': 'Dockerfile',\n 'content': t.render(commit=self.commit),\n })\n\n # gunicorn\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.conf.py'\n )\n self.files.append({\n 'name': 'gunicorn.conf.py',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.sh'\n )\n self.files.append({\n 'name': 'gunicorn.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # nginx\n t = self.templates.get_template(\n 'docker/nginx/app.nginx.conf'\n )\n self.files.append({\n 'name': 'app.nginx.conf',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/nginx/nginx.sh'\n )\n self.files.append({\n 'name': 'nginx.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # cron/, etc/ iif there exists a `self.repo` directory\n def _filter(p):\n return (\"cron/\" in p or \"etc/\" in p) and (self.repo in p) and \\\n (not os.path.basename(p).startswith('.'))\n\n for t in self.templates.list_templates(\n filter_func=_filter):\n\n self.files.append({\n 'name': os.path.basename(t),\n 'content': self.templates.get_template(t).render(),\n })", "def get_config_file_content(self):\n\n config_content: List[str] = [\n 'server {',\n\t ' listen {};'.format(self.port),\n '',\n ' ##',\n ' # PHP-FPM',\n ' ##',\n ' #location ~ \\.php$ {',\n \t ' #include /etc/nginx/fastcgi_params;',\n\t\t ' #root /var/www/src;',\n ' #fastcgi_split_path_info ^(.+?\\.php)(/.*)$;',\n ' #fastcgi_pass\tphpfpm:3002;',\n\t\t ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;',\n ' #}',\n '',\n ' location / {',\n\t\t ' root /var/www/src;',\n ' index index.html;'\n\t\t ' #index index.php;',\n\t\t ' #rewrite ^ /index.php?$args last; break;',\n\t ' }',\n '}'\n ]\n return config_content", "def default_from_nginx(lines):\n\n for i in lines[\n lines.index(\"[nginx] [INFO] Test docker hub official image first:\\n\"):\n lines.index(\"[nginx] [INFO] Test clear docker image:\\n\")]:\n\n if i.startswith(\"Time taken for tests\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"default\").get(\"nginx\").update(\n {\"Time taken for tests\": num[0]}\n )\n\n if i.endswith(\"[ms] (mean)\\n\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"default\").get(\"nginx\").update(\n {\"Time per request\": num[0]}\n )\n\n if i.endswith(\"(mean, across all concurrent requests)\\n\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"default\").get(\"nginx\").update(\n {\"Time per request(all)\": num[0]}\n )\n\n if i.startswith(\"Requests per second\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"default\").get(\"nginx\").update(\n {\"Requests per second\": num[0]}\n )\n\n if i.startswith(\"Transfer rate\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"default\").get(\"nginx\").update(\n {\"Transfer rate\": num[0]}\n )", "def dockerfile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"dockerfile\")", "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # artifact_id\n # attachment_id\n # docker_artifact_type\n # docker_image\n # docker_input\n # docker_operation\n # incident_id\n # task_id\n # DataTables:\n # docker_integration_invocations\n # Message Destinations:\n # fn_docker\n # Functions:\n # docker_run_docker_container\n # Workflows:\n # docker_analyze_artifact_with_docker_container_amass\n # docker_analyze_artifact_with_docker_container_nsrl\n # docker_send_attachment_to_docker_container\n # Rules:\n # Docker: Amass: Search for Subdomains\n # Docker: NSRL: Validate MD5 from Whitelist\n # Docker: Volatility: Analyze Memory Sample\n\n\n yield ImportDefinition(u\"\"\"\neyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgIm1pbm9yIjogMCwgImJ1aWxkX251bWJl\nciI6IDQyMzUsICJ2ZXJzaW9uIjogIjMxLjAuNDIzNSJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u\nIjogMiwgImlkIjogODAsICJleHBvcnRfZGF0ZSI6IDE1NTI0OTk3NDk4MjksICJmaWVsZHMiOiBb\neyJpZCI6IDIyMywgIm5hbWUiOiAiaW5jX3RyYWluaW5nIiwgInRleHQiOiAiU2ltdWxhdGlvbiIs\nICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDAsICJ0b29sdGlwIjogIldoZXRoZXIgdGhlIGlu\nY2lkZW50IGlzIGEgc2ltdWxhdGlvbiBvciBhIHJlZ3VsYXIgaW5jaWRlbnQuICBUaGlzIGZpZWxk\nIGlzIHJlYWQtb25seS4iLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImhpZGVfbm90aWZpY2F0\naW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6\nIGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6\nICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EiLCAib3BlcmF0aW9ucyI6IFtd\nLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IHRydWUs\nICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJp\nbmNpZGVudC9pbmNfdHJhaW5pbmciLCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFs\nc2V9LCB7ImlkIjogMzAxLCAibmFtZSI6ICJ0YXNrX2lkIiwgInRleHQiOiAidGFza19pZCIsICJw\ncmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIi\nOiAiIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxh\nbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICJiYTMxODI2MS1l\nZDZhLTRhMzgtYTE4Ny05ZTBiNjhkMTYwNGYiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9u\nX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJs\nZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi90\nYXNrX2lkIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDM1\nMCwgIm5hbWUiOiAiZG9ja2VyX2FydGlmYWN0X3R5cGUiLCAidGV4dCI6ICJkb2NrZXJfYXJ0aWZh\nY3RfdHlwZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICJUaGUg\ndHlwZSBvZiBhcnRpZmFjdCB0aGF0IHRoaXMgaW50ZWdyYXRpb24gd2FzIHJhbiBhZ2FpbnN0LiBO\nb3QgdXNlZCBmb3IgYXR0YWNobWVudCB3b3JrZmxvd3MuIiwgInBsYWNlaG9sZGVyIjogIiIsICJp\nbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6\nIGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24i\nOiBmYWxzZSwgImludGVybmFsIjogZmFsc2UsICJ1dWlkIjogIjIwYjVmNjYxLWI1NjItNGE3OC04\nYTQwLTNkOWM5ZjI0Y2I2OCIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7\nfSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwg\nInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2RvY2tlcl9hcnRp\nZmFjdF90eXBlIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6\nIDI5NywgIm5hbWUiOiAiYXJ0aWZhY3RfaWQiLCAidGV4dCI6ICJhcnRpZmFjdF9pZCIsICJwcmVm\naXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAi\nIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJj\naG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtf\nb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI2MmQ5MzEwNS03MDVk\nLTQ4NzYtOTgxMy1lNjBlZTQzZTE5ZWQiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6\nIHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9hcnRp\nZmFjdF9pZCIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sIHsiaWQiOiAy\nOTksICJuYW1lIjogImRvY2tlcl9pbnB1dCIsICJ0ZXh0IjogImRvY2tlcl9pbnB1dCIsICJwcmVm\naXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICJBbiBpbnB1dCB0byBiZSBmZWQg\naW50byBhIGRvY2tlciBjb250YWluZXIuIEludGVuZGVkIGZvciB1c2Ugd2l0aCBhcnRpZmFjdCB2\nYWx1ZXMiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25v\ndGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9z\nZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwg\nInV1aWQiOiAiNGZjMzA5ZjEtMzQwNi00NjRmLWJlNmQtZDM3OWRjMjNkNDExIiwgIm9wZXJhdGlv\nbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHki\nOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRf\na2V5IjogIl9fZnVuY3Rpb24vZG9ja2VyX2lucHV0IiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVj\nYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDM1MywgIm5hbWUiOiAiZG9ja2VyX29wZXJhdGlvbiIsICJ0\nZXh0IjogImRvY2tlcl9vcGVyYXRpb24iLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwg\nInRvb2x0aXAiOiAiQSBwYXJhbSB2YWx1ZSB0byBiZSBmZWQgdG8gYSBjb250YWluZXIncyBydW4g\nY29tbWFuZCBzcGVjaWZ5aW5nIGEgcGFydGljdWxhciBlbnRyeXBvaW50IG9yIGZ1bmN0aW9uIGZv\nciB0aGF0IGltYWdlLiBVc2VkIGZvciBjb250YWluZXJzIHdoaWNoIGhhdmUgbXVsdGlwbGUgcG9z\nc2libGUgb3BlcmF0aW9ucyB5b3UgY2FuIHBlcmZvcm0gaW4gdGhlbSBzdWNoIGFzIFZvbGF0aWxp\ndHkiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25vdGlm\naWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2\nZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1\naWQiOiAiZTg5M2UwOGQtOTQwOC00NDQ5LTg5YWItOTI4YzFjZTFlNGQ0IiwgIm9wZXJhdGlvbnMi\nOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBm\nYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5\nIjogIl9fZnVuY3Rpb24vZG9ja2VyX29wZXJhdGlvbiIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJl\nY2F0ZWQiOiBmYWxzZX0sIHsiaWQiOiAzMDAsICJuYW1lIjogImF0dGFjaG1lbnRfaWQiLCAidGV4\ndCI6ICJhdHRhY2htZW50X2lkIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTEsICJ0b29s\ndGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaGlk\nZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5f\nYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVybmFsIjogZmFs\nc2UsICJ1dWlkIjogIjE2Nzc3MTZhLWE5NWUtNGY1NS04ZTNlLTUzOTllNmQzYmQ5NiIsICJvcGVy\nYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9v\nbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhw\nb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2F0dGFjaG1lbnRfaWQiLCAidGVtcGxhdGVzIjogW10sICJk\nZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMzM1LCAibmFtZSI6ICJkb2NrZXJfaW1hZ2UiLCAi\ndGV4dCI6ICJkb2NrZXJfaW1hZ2UiLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwgInRv\nb2x0aXAiOiAiQW4gSW1hZ2UgdG8gYmUgdXNlZCB0byBjcmVhdGUgYSBjb250YWluZXIuIE11c3Qg\nYmUgYW4gYXBwcm92ZWQgaW1hZ2Ugd2hpY2ggaXMgc2V0IGluIHRoZSBhcHAuY29uZmlnLiIsICJw\nbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAicmVxdWlyZWQiOiAiYWx3\nYXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1\nbHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVy\nbmFsIjogZmFsc2UsICJ1dWlkIjogImU2ZDY2YmFjLTg0MWQtNDAzZi04MmZhLTg2MmRjM2NkMjIy\nZiIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFt7\nInZhbHVlIjogMTg1MCwgImxhYmVsIjogInZvbGF0aWxpdHkiLCAiZW5hYmxlZCI6IHRydWUsICJw\ncm9wZXJ0aWVzIjogbnVsbCwgInV1aWQiOiAiN2YzNmEyODUtYjJiMC00MDFiLWEwY2EtYTQ3OGFl\nOTBiZTZiIiwgImhpZGRlbiI6IGZhbHNlLCAiZGVmYXVsdCI6IHRydWV9LCB7InZhbHVlIjogMTg1\nMSwgImxhYmVsIjogIm5zcmwiLCAiZW5hYmxlZCI6IHRydWUsICJwcm9wZXJ0aWVzIjogbnVsbCwg\nInV1aWQiOiAiZjY0OTk5YmEtMjc4Ny00YjIxLThjNmMtMWUwZDQ5NzYwMDllIiwgImhpZGRlbiI6\nIGZhbHNlLCAiZGVmYXVsdCI6IGZhbHNlfSwgeyJ2YWx1ZSI6IDE5MDAsICJsYWJlbCI6ICJhbWFz\ncyIsICJlbmFibGVkIjogdHJ1ZSwgInByb3BlcnRpZXMiOiBudWxsLCAidXVpZCI6ICIzNTY2MTlm\nOC0zYWViLTQ3YTMtODRiYi1jMzM1YzFhMTNiYWYiLCAiaGlkZGVuIjogZmFsc2UsICJkZWZhdWx0\nIjogZmFsc2V9XSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo\nX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9kb2NrZXJfaW1hZ2UiLCAi\ndGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMjk4LCAibmFtZSI6\nICJpbmNpZGVudF9pZCIsICJ0ZXh0IjogImluY2lkZW50X2lkIiwgInByZWZpeCI6IG51bGwsICJ0\neXBlX2lkIjogMTEsICJ0b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlw\nZSI6ICJudW1iZXIiLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjog\nZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNl\nLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI4MTFl\nOTlkNy1kMTk0LTRjZTgtODZjYy1hZmY1ZTAxYWI4NWMiLCAib3BlcmF0aW9ucyI6IFtdLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hh\nbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5j\ndGlvbi9pbmNpZGVudF9pZCIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX1d\nLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1NTI1MDA0MDQ3NTYsICJjcmVh\ndGVfZGF0ZSI6IDE1NTI1MDA0MDQ3NTYsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5\nLTRhMDAwNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChp\nbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h\nbCkiLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxl\nZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjog\nZmFsc2UsICJpZCI6IDB9XSwgInBoYXNlcyI6IFtdLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJv\ndmVycmlkZXMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3sibmFtZSI6ICJEb2NrZXIg\nTWVzc2FnZSBEZXN0aW5hdGlvbiIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJmbl9kb2NrZXIiLCAi\nZGVzdGluYXRpb25fdHlwZSI6IDAsICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjogWyJhbGZy\nZWRAd2F5bmVjb3JwLmNvbSIsICJpbnRlZ3JhdGlvbi1zZXJ2ZXIuYWxmcmVkQHdheW5lY29ycC5j\nb20iXSwgInV1aWQiOiAiMDM0NTVlODEtYWFiNC00YzVhLWI1ZDUtMmFhZGQ3Yjk1Zjc5IiwgImV4\ncG9ydF9rZXkiOiAiZm5fZG9ja2VyIn1dLCAiYWN0aW9ucyI6IFt7ImlkIjogODEsICJuYW1lIjog\nIkRvY2tlcjogQW1hc3M6IFNlYXJjaCBmb3IgU3ViZG9tYWlucyIsICJ0eXBlIjogMSwgIm9iamVj\ndF90eXBlIjogImFydGlmYWN0IiwgImNvbmRpdGlvbnMiOiBbeyJtZXRob2QiOiAiZXF1YWxzIiwg\nImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJ2YWx1ZSI6ICJETlMgTmFtZSIsICJ0eXBl\nIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVz\nc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZG9ja2VyX2FuYWx5emVfYXJ0\naWZhY3Rfd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIl0sICJ2aWV3X2l0ZW1zIjogW10sICJ0\naW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiNzM0MTVmMGUtNTIyNi00OGVjLTgzZjYt\nMWQwMjk3MzU3MGExIiwgImV4cG9ydF9rZXkiOiAiRG9ja2VyOiBBbWFzczogU2VhcmNoIGZvciBT\ndWJkb21haW5zIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiA2MSwgIm5hbWUiOiAiRG9j\na2VyOiBOU1JMOiBWYWxpZGF0ZSBNRDUgZnJvbSBXaGl0ZWxpc3QiLCAidHlwZSI6IDEsICJvYmpl\nY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJjb25kaXRpb25zIjogW3sibWV0aG9kIjogImVxdWFscyIs\nICJmaWVsZF9uYW1lIjogImFydGlmYWN0LnR5cGUiLCAidmFsdWUiOiAiTWFsd2FyZSBNRDUgSGFz\naCIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6\nIFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZG9ja2VyX2Fu\nYWx5emVfYXJ0aWZhY3Rfd2l0aF9kb2NrZXJfY29udGFpbmVyX25zcmwiXSwgInZpZXdfaXRlbXMi\nOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICJmNDk0NjhlNC1hZmQ2LTRl\nZGQtOWZkYy00NTgxZmRmOTZhYzUiLCAiZXhwb3J0X2tleSI6ICJEb2NrZXI6IE5TUkw6IFZhbGlk\nYXRlIE1ENSBmcm9tIFdoaXRlbGlzdCIsICJsb2dpY190eXBlIjogImFsbCJ9LCB7ImlkIjogMjgs\nICJuYW1lIjogIkRvY2tlcjogVm9sYXRpbGl0eTogQW5hbHl6ZSBNZW1vcnkgU2FtcGxlIiwgInR5\ncGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJjb25kaXRpb25zIjogW3sibWV0\naG9kIjogImNvbnRhaW5zIiwgImZpZWxkX25hbWUiOiAiYXR0YWNobWVudC5uYW1lIiwgInZhbHVl\nIjogIi52bWVtIiwgInR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGx9XSwgImF1dG9t\nYXRpb25zIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAid29ya2Zsb3dzIjogWyJk\nb2NrZXJfc2VuZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiXSwgInZpZXdfaXRlbXMi\nOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICI3ZDA2OTI2Zi0yOGEyLTQ4\nY2EtOGRlNS1iZjk2ZDk1MGJiZmQiLCAiZXhwb3J0X2tleSI6ICJEb2NrZXI6IFZvbGF0aWxpdHk6\nIEFuYWx5emUgTWVtb3J5IFNhbXBsZSIsICJsb2dpY190eXBlIjogImFsbCJ9XSwgImxheW91dHMi\nOiBbXSwgIm5vdGlmaWNhdGlvbnMiOiBudWxsLCAidGltZWZyYW1lcyI6IG51bGwsICJsb2NhbGUi\nOiBudWxsLCAiaW5kdXN0cmllcyI6IG51bGwsICJyZWd1bGF0b3JzIjogbnVsbCwgImdlb3MiOiBu\ndWxsLCAidGFza19vcmRlciI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJ0eXBlcyI6IFt7Imlk\nIjogbnVsbCwgInR5cGVfaWQiOiA4LCAidHlwZV9uYW1lIjogImRvY2tlcl9pbnRlZ3JhdGlvbl9p\nbnZvY2F0aW9ucyIsICJmaWVsZHMiOiB7ImRvY2tlcl9saW5rcyI6IHsiaWQiOiAzNDgsICJuYW1l\nIjogImRvY2tlcl9saW5rcyIsICJ0ZXh0IjogIkxpbmtzIiwgInByZWZpeCI6IG51bGwsICJ0eXBl\nX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiUmVsZXZhbnQgbGlua3MgYmFjayB0byB0aGUgdGFzaywg\naWYgdGFzayBiYXNlZCIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0YXJl\nYSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1bHRf\nY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVybmFs\nIjogZmFsc2UsICJ1dWlkIjogIjA2ZDg1ZWFjLTVhNGUtNDNhMy05ZjViLWU3NGZlYzFlNjgyMiIs\nICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAi\ncmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IHRydWUs\nICJleHBvcnRfa2V5IjogImRvY2tlcl9pbnRlZ3JhdGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfbGlu\na3MiLCAib3JkZXIiOiA2LCAid2lkdGgiOiA2NSwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRl\nZCI6IGZhbHNlfSwgImRvY2tlcl9hdHRhY2htZW50X25hbWUiOiB7ImlkIjogMzUyLCAibmFtZSI6\nICJkb2NrZXJfYXR0YWNobWVudF9uYW1lIiwgInRleHQiOiAiQXR0YWNobWVudCBOYW1lIiwgInBy\nZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiVGhlIG5hbWUgb2YgdGhl\nIGF0dGFjaG1lbnQgdGhhdCB3YXMgc2VudCB0byB0aGUgRG9ja2VyIGNvbnRhaW5lci4gV2lsbCBi\nZSBibGFuayBpZiByYW4gYXQgYW4gYXJ0aWZhY3QgbGV2ZWwuIiwgInBsYWNlaG9sZGVyIjogIiIs\nICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3Nl\nbiI6IHRydWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlv\nbiI6IHRydWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI4YmFiMGJkNC1lMWI0LTQxOGEt\nYWY5ZC03OTE2YTg1NGQ2OGIiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjog\ne30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUs\nICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2lu\ndm9jYXRpb25zL2RvY2tlcl9hdHRhY2htZW50X25hbWUiLCAib3JkZXIiOiAzLCAid2lkdGgiOiAx\nMDcsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJfdGltZXN0\nYW1wIjogeyJpZCI6IDM0NCwgIm5hbWUiOiAiZG9ja2VyX3RpbWVzdGFtcCIsICJ0ZXh0IjogIklu\ndGVncmF0aW9uIFJ1biBUaW1lIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRv\nb2x0aXAiOiAiVGhlIHRpbWUgdGhhdCB0aGUgZnVuY3Rpb24gZmluaXNoZWQuIiwgInBsYWNlaG9s\nZGVyIjogIiIsICJpbnB1dF90eXBlIjogImRhdGV0aW1lcGlja2VyIiwgInJlcXVpcmVkIjogImFs\nd2F5cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1\nbHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVy\nbmFsIjogZmFsc2UsICJ1dWlkIjogImVlOTQwNjEwLTY5N2EtNGMzOS05NWRjLTYyMWY2YTU1NjA3\nNyIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtd\nLCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZh\nbHNlLCAiZXhwb3J0X2tleSI6ICJkb2NrZXJfaW50ZWdyYXRpb25faW52b2NhdGlvbnMvZG9ja2Vy\nX3RpbWVzdGFtcCIsICJvcmRlciI6IDAsICJ3aWR0aCI6IDkwLCAidGVtcGxhdGVzIjogW10sICJk\nZXByZWNhdGVkIjogZmFsc2V9LCAiZG9ja2VyX2NvbnRhaW5lcl9pZCI6IHsiaWQiOiAzNDUsICJu\nYW1lIjogImRvY2tlcl9jb250YWluZXJfaWQiLCAidGV4dCI6ICJEb2NrZXIgQ29udGFpbmVyIElE\nIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiVGhlIElEIG9m\nIHRoZSBjb250YWluZXIgdGhhdCB3YXMgdXNlZC4gIiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1\ndF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IHRy\ndWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRy\ndWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICIxNjJhYWY2MC0wYTJkLTQxYjMtYjQ3My1j\nZTBkOTRkNDY2MDEiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2\nYWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo\nX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2ludm9jYXRp\nb25zL2RvY2tlcl9jb250YWluZXJfaWQiLCAib3JkZXIiOiA0LCAid2lkdGgiOiAxOTYsICJ0ZW1w\nbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJfYXJ0aWZhY3RfdmFsdWUi\nOiB7ImlkIjogMzQ5LCAibmFtZSI6ICJkb2NrZXJfYXJ0aWZhY3RfdmFsdWUiLCAidGV4dCI6ICJB\ncnRpZmFjdCBWYWx1ZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEwMDEsICJ0b29sdGlw\nIjogIlRoZSBhcnRpZmFjdCB0aGF0IHdhcyBzZW50IHRvIHRoZSBEb2NrZXIgY29udGFpbmVyLiBX\naWxsIGJlIGJsYW5rIGlmIHJhbiBhdCBhbiBhdHRhY2htZW50IGxldmVsLiIsICJwbGFjZWhvbGRl\nciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFu\na19vcHRpb24iOiB0cnVlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiMDY5ZDU4NTItZTA0\nMi00MjgxLWI0YzktZjc2OTY3NTNjZjNhIiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9w\nZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUi\nOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogImRvY2tlcl9pbnRlZ3Jh\ndGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfYXJ0aWZhY3RfdmFsdWUiLCAib3JkZXIiOiAyLCAid2lk\ndGgiOiAxMDMsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJf\nYXJ0aWZhY3RfdHlwZSI6IHsiaWQiOiAzNTEsICJuYW1lIjogImRvY2tlcl9hcnRpZmFjdF90eXBl\nIiwgInRleHQiOiAiQXJ0aWZhY3QgVHlwZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEw\nMDEsICJ0b29sdGlwIjogIlRoZSB0eXBlIG9mIGFydGlmYWN0IHRoYXQgd2FzIHVzZWQgYXMgYW4g\naW5wdXQuIFdpbGwgYmUgYmxhbmsgaWYgcmFuIGF0IGFuIGF0dGFjaG1lbnQgbGV2ZWwuIiwgInBs\nYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgImNob3NlbiI6IHRydWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxz\nZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICJlZjQy\nNTdjYy00YzhkLTQ1NGYtOWJkNy00ODVlNTA3MjMwMmUiLCAib3BlcmF0aW9ucyI6IFtdLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hh\nbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2Vy\nX2ludGVncmF0aW9uX2ludm9jYXRpb25zL2RvY2tlcl9hcnRpZmFjdF90eXBlIiwgIm9yZGVyIjog\nMSwgIndpZHRoIjogNzcsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJk\nb2NrZXJfaW1hZ2UiOiB7ImlkIjogMzQ2LCAibmFtZSI6ICJkb2NrZXJfaW1hZ2UiLCAidGV4dCI6\nICJEb2NrZXIgSW1hZ2UgJiBPcGVyYXRpb24iLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAx\nMDAxLCAidG9vbHRpcCI6ICJUaGUgbmFtZSBvZiB0aGUgaW1hZ2UgdGhhdCB3YXMgdXNlZC4gSW4g\nc29tZSBjYXNlcyBhIHNwZWNpZmllZCBvcGVyYXRpb24gd2lsbCBiZSBzZW50IHRvIHRoZSBjb250\nYWluZXIgaW4gY2FzZXMgd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIHBvc3NpYmxlIGVudHJ5cG9p\nbnRzLiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImhpZGVfbm90\naWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy\ndmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1\naWQiOiAiMDUwNDZlMTgtYTQ5OS00MWNhLTg2NzAtNjM1OTNjMzIyN2I2IiwgIm9wZXJhdGlvbnMi\nOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBm\nYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5\nIjogImRvY2tlcl9pbnRlZ3JhdGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfaW1hZ2UiLCAib3JkZXIi\nOiA1LCAid2lkdGgiOiAxMzEsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX19\nLCAicHJvcGVydGllcyI6IHsiY2FuX2NyZWF0ZSI6IGZhbHNlLCAiY2FuX2Rlc3Ryb3kiOiBmYWxz\nZSwgImZvcl93aG8iOiBbXX0sICJwYXJlbnRfdHlwZXMiOiBbImluY2lkZW50Il0sICJkaXNwbGF5\nX25hbWUiOiAiRG9ja2VyIEludGVncmF0aW9uIEludm9jYXRpb25zIiwgImZvcl9ub3RpZmljYXRp\nb25zIjogZmFsc2UsICJmb3JfYWN0aW9ucyI6IGZhbHNlLCAiZm9yX2N1c3RvbV9maWVsZHMiOiBm\nYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2ludm9jYXRpb25zIiwgInV1\naWQiOiAiZjQxOGRhYWUtMTg5OC00ODFmLWI2YTItYmRlODgxY2RhZWIzIiwgImFjdGlvbnMiOiBb\nXSwgInNjcmlwdHMiOiBbXX1dLCAic2NyaXB0cyI6IFtdLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlw\nZXMiOiBbXSwgIndvcmtmbG93cyI6IFt7IndvcmtmbG93X2lkIjogNTUsICJuYW1lIjogIkV4YW1w\nbGU6IERvY2tlcjpTZW5kIEFydGlmYWN0IFRvIERvY2tlciBDb250YWluZXIgKE5TUkwpIiwgInBy\nb2dyYW1tYXRpY19uYW1lIjogImRvY2tlcl9hbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2Nv\nbnRhaW5lcl9uc3JsIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjog\nIkFuIGV4YW1wbGUgd29ya2Zsb3cgc2NvcGVkIGZvciBBcnRpZmFjdHMgd2hpY2ggd2lsbCwgd2hl\nbiBpbnZva2VkLCBzZW5kIHRoZSBhcnRpZmFjdCB0byBhIERvY2tlciBjb250YWluZXIsIHBlcmZv\ncm0gc29tZSBvcGVyYXRpb24gb24gdGhlIGlucHV0IGFuZCByZXR1cm5zIGluZm9ybWF0aW9uIHRv\nIFJlc2lsaWVudC4iLCAiY3JlYXRvcl9pZCI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIsICJsYXN0\nX21vZGlmaWVkX2J5IjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZpZWRfdGlt\nZSI6IDE1NTE5NTQxMzQwNjAsICJleHBvcnRfa2V5IjogImRvY2tlcl9hbmFseXplX2FydGlmYWN0\nX3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9uc3JsIiwgInV1aWQiOiAiMDI2NGE3MTMtMGFiYi00M2Mx\nLTgzMmUtYjM0MmYzYTgxYzA2IiwgImNvbnRlbnQiOiB7IndvcmtmbG93X2lkIjogImRvY2tlcl9h\nbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9uc3JsIiwgInhtbCI6ICI8P3ht\nbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9\nXCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBt\nbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9t\nZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdk\naT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxp\nZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8v\nd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3Jn\nLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5j\nYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJkb2NrZXJfYW5hbHl6ZV9hcnRpZmFjdF93\naXRoX2RvY2tlcl9jb250YWluZXJfbnNybFwiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwi\nRXhhbXBsZTogRG9ja2VyOlNlbmQgQXJ0aWZhY3QgVG8gRG9ja2VyIENvbnRhaW5lciAoTlNSTClc\nIj48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXJ0aWZhY3Rz\nIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXJ0aWZhY3QgdG8gYSBEb2NrZXIg\nY29udGFpbmVyLCBwZXJmb3JtIHNvbWUgb3BlcmF0aW9uIG9uIHRoZSBpbnB1dCBhbmQgcmV0dXJu\ncyBpbmZvcm1hdGlvbiB0byBSZXNpbGllbnQuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlk\nPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xaWJiOTNuPC9v\ndXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDNocnlz\nNFwiIG5hbWU9XCJEb2NrZXI6IFJ1biBEb2NrZXIgQ29udGFpbmVyXCIgcmVzaWxpZW50OnR5cGU9\nXCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9\nXCI3YTIyMGJlMy0wNWY3LTRiMTctYTFhNy05N2I0MDc2ZTE0YmVcIj57XCJpbnB1dHNcIjp7XCJl\nNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmZcIjp7XCJpbnB1dF90eXBlXCI6XCJz\ndGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxl\nY3RfdmFsdWVcIjpcImY2NDk5OWJhLTI3ODctNGIyMS04YzZjLTFlMGQ0OTc2MDA5ZVwifX19LFwi\ncG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwiXFxc\nIiZsdDtiJmd0O0RvY2tlciBJbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZs\ndDticiZndDsmbHQ7YnImZ3Q7QSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2UgJmx0\nO2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxc\nImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0pXFxuICAgICAgICAgICAgICBcXG4jIElm\nIHRoZSBBdHRhY2htZW50IGF0dHJpYnV0ZSBvZiB0aGUgY29udGVudCBwYXlsb2FkIGlzIHNldDsg\nd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhdHRhY2htZW50XFxuaWYgcmVzdWx0cy5jb250ZW50W1xc\nXCJhdHRhY2htZW50X25hbWVcXFwiXSAhPSBOb25lOlxcbiAgbm90ZV90ZXh0X2F0dGFjaG1lbnQg\nPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBdHRhY2htZW50IHdpdGggbmFtZSB7MH0g\nXFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFtZVxc\nXCJdKVxcbiAgbm90ZV90ZXh0X3N0YXJ0ICs9IG5vdGVfdGV4dF9hdHRhY2htZW50XFxuXFxuIyBP\ndGhlcndpc2Ugd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhcnRpZmFjdFxcbmVsc2U6XFxuICBub3Rl\nX3RleHRfYXJ0aWZhY3QgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBcnRpZmFjdCBv\nZiBUeXBlOiAmbHQ7YiZndDt7MH0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICAgICAgICAg\nICAmbHQ7YnImZ3Q7IEFydGlmYWN0IFZhbHVlOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxcIlxc\nXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2FydGlmYWN0X3R5cGVcXFwi\nXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9pbnB1dFxcXCJdKVxcbiAgbm90ZV90ZXh0X3N0\nYXJ0ICs9IG5vdGVfdGV4dF9hcnRpZmFjdFxcbiAgICAgICAgICAgICAgXFxubm90ZV90ZXh0X2Vu\nZCA9IFxcXCJcXFwiXFxcIiZsdDticiZndDtDb250YWluZXIgSUQgOiAmbHQ7YiZndDt7MH0mbHQ7\nL2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Q29udGFpbmVyIGV4aXQgY29kZSA6ICZs\ndDtiJmd0O3sxfSZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnImZ3Q7\nIENvbnRhaW5lciBMb2dzIGhhdmUgYmVlbiBzYXZlZCBhcyBhbiBhdHRhY2htZW50LlxcbiAgICAg\nICAgICAgICAgQ29udGFpbmVyIFN0YXRzLCBMb2dzLCBGdW5jdGlvbiBJbnB1dHMgb3IgUnVuIFRp\nbWUgTWV0cmljcyBhcmUgYWxzbyBhdmFpbGFibGUgYXMgcGFydCBvZiB0aGUgcmVzdWx0IHBheWxv\nYWRcXFwiXFxcIlxcXCIuZm9ybWF0KFxcbiAgICAgICAgICAgICAgICByZXN1bHRzLmNvbnRlbnRb\nXFxcImNvbnRhaW5lcl9pZFxcXCJdLCByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9leGl0\nX3N0YXR1c1xcXCJdKVxcblxcbm5vdGVfdGV4dCA9IG5vdGVfdGV4dF9zdGFydCtub3RlX3RleHRf\nZW5kXFxuaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQobm90ZV90ZXh0KSlc\nXG5cXG50cnk6XFxuICAgIGRlcyA9IGFydGlmYWN0LmRlc2NyaXB0aW9uLmNvbnRlbnRcXG5leGNl\ncHQgRXhjZXB0aW9uOlxcbiAgZGVzID0gTm9uZVxcbiAgXFxuaWYgZGVzIGlzIE5vbmU6XFxuICBc\nXG4gIGFydGlmYWN0LmRlc2NyaXB0aW9uID0gdVxcXCJcXFwiXFxcIkRvY2tlciBJbnRlZ3JhdGlv\nbjpcXFxcbiBBcnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9ICBcXFxcbiBS\nZXN1bHRzOlxcXFxuezF9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9j\na2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSxyZXN1bHRzLmNvbnRlbnRbXFxcImxvZ3NcXFwi\nXSlcXG4gICMgVW5jb21tZW50IHRoaXMgbGluZSB0byBOT1QgaGF2ZSB0aGUgcmVzdWx0cyBhcHBl\nbmRlZCB0byB0aGUgZGVzY3JpcHQgb2YgdGhlIGFydGlmYWN0XFxuICAjYXJ0aWZhY3QuZGVzY3Jp\ncHRpb24gPSB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBBcnRpZmFjdCB3YXMgc2Nh\nbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0\nc1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG5lbHNlOlxcbiAgXFxuICBh\ncnRpZmFjdC5kZXNjcmlwdGlvbiA9IGRlcyArIHVcXFwiXFxcIlxcXCJEb2NrZXIgSW50ZWdyYXRp\nb246IEFydGlmYWN0IHdhcyBzY2FubmVkIGJ5IGRvY2tlciBpbWFnZSB7MH0gXFxcXG4gUmVzdWx0\nczpcXFxcbnsxfVxcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9p\nbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0scmVzdWx0cy5jb250ZW50W1xcXCJsb2dzXFxcIl0pXFxu\nICBcXG4gICMgVW5jb21tZW50IHRoaXMgbGluZSB0byBOT1QgaGF2ZSB0aGUgcmVzdWx0cyBhcHBl\nbmRlZCB0byB0aGUgZGVzY3JpcHQgb2YgdGhlIGFydGlmYWN0XFxuICAjYXJ0aWZhY3QuZGVzY3Jp\ncHRpb24gPSBkZXMgKyB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBBcnRpZmFjdCB3\nYXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRz\nLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG4gIFxcbiAgXFxu\ncm93ID0gaW5jaWRlbnQuYWRkUm93KFxcXCJkb2NrZXJfaW50ZWdyYXRpb25faW52b2NhdGlvbnNc\nXFwiKVxcbnJvd1tcXFwiZG9ja2VyX3RpbWVzdGFtcFxcXCJdID0gcmVzdWx0c1tcXFwibWV0cmlj\nc1xcXCJdW1xcXCJ0aW1lc3RhbXBfZXBvY2hcXFwiXSBvciAwXFxucm93W1xcXCJkb2NrZXJfY29u\ndGFpbmVyX2lkXFxcIl0gPSByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9pZFxcXCJdXFxu\ncm93W1xcXCJkb2NrZXJfaW1hZ2VcXFwiXSA9IHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW1h\nZ2VcXFwiXVtcXFwibmFtZVxcXCJdXFxuXFxucm93W1xcXCJkb2NrZXJfYXJ0aWZhY3RfdHlwZVxc\nXCJdID0gcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9hcnRpZmFjdF90eXBlXFxcIl1cXG5yb3db\nXFxcImRvY2tlcl9hcnRpZmFjdF92YWx1ZVxcXCJdID0gcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9pbnB1dFxcXCJdXFxuXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5kb2Nr\nZXJfaW5wdXQgPSBhcnRpZmFjdC52YWx1ZVxcbmlucHV0cy5pbmNpZGVudF9pZCA9IGluY2lkZW50\nLmlkIFxcbmlucHV0cy5kb2NrZXJfYXJ0aWZhY3RfdHlwZSA9IGFydGlmYWN0LnR5cGVcIn08L3Jl\nc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZs\nb3dfMWliYjkzbjwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xbTIyZHAwPC9vdXRn\nb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkz\nblwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VU\nYXNrXzAzaHJ5czRcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMDZxdXA1YlwiPjxpbmNvbWlu\nZz5TZXF1ZW5jZUZsb3dfMW0yMmRwMDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93\nIGlkPVwiU2VxdWVuY2VGbG93XzFtMjJkcDBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wM2hy\neXM0XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMDZxdXA1YlwiLz48dGV4dEFubm90YXRpb24gaWQ9\nXCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJl\nPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNl\ndWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRB\nbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJC\nUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRc\nIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFy\ndEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIx\nNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9\nXCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwi\nIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5k\naTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wM2hyeXM0XCIgaWQ9XCJTZXJ2\naWNlVGFza18wM2hyeXM0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwi\nMTAwXCIgeD1cIjM4N1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBN\nTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkzblwiIGlkPVwiU2VxdWVuY2VG\nbG93XzFpYmI5M25fZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzg3XCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyOTIuNVwiIHk9XCIxODRcIi8+PC9icG1u\nZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt\nZW50PVwiRW5kRXZlbnRfMDZxdXA1YlwiIGlkPVwiRW5kRXZlbnRfMDZxdXA1Yl9kaVwiPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjY0MlwiIHk9XCIxODhcIi8+\nPGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwi\nIHg9XCI2NjBcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hh\ncGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xbTIyZHAwXCIg\naWQ9XCJTZXF1ZW5jZUZsb3dfMW0yMmRwMF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDg3XCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2\nNDJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJl\nbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjU2NC41XCIgeT1c\nIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBN\nTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiAx\nMX0sICJhY3Rpb25zIjogW119LCB7IndvcmtmbG93X2lkIjogNTYsICJuYW1lIjogIkV4YW1wbGU6\nIERvY2tlcjpTZW5kIEFydGlmYWN0IFRvIERvY2tlciBDb250YWluZXIgKEFtYXNzKSIsICJwcm9n\ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfYW5hbHl6ZV9hcnRpZmFjdF93aXRoX2RvY2tlcl9jb250\nYWluZXJfYW1hc3MiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiAi\nQW4gZXhhbXBsZSB3b3JrZmxvdyBzY29wZWQgZm9yIEFydGlmYWN0cyB3aGljaCB3aWxsLCB3aGVu\nIGludm9rZWQsIHNlbmQgdGhlIGFydGlmYWN0IHRvIGEgRG9ja2VyIGNvbnRhaW5lciwgcGVyZm9y\nbSBzb21lIG9wZXJhdGlvbiBvbiB0aGUgaW5wdXQgYW5kIHJldHVybnMgaW5mb3JtYXRpb24gdG8g\nUmVzaWxpZW50LiIsICJjcmVhdG9yX2lkIjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3Rf\nbW9kaWZpZWRfYnkiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAibGFzdF9tb2RpZmllZF90aW1l\nIjogMTU1MTk1NDEzMDExMiwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rf\nd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIiwgInV1aWQiOiAiNDVmZjY4NzgtM2I4YS00ZWQx\nLWI5ZDAtYzc5YmE0MjQ3MzA1IiwgImNvbnRlbnQiOiB7IndvcmtmbG93X2lkIjogImRvY2tlcl9h\nbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9hbWFzcyIsICJ4bWwiOiAiPD94\nbWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5z\nPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJw\nbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpv\nbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21n\nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2ls\naWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDov\nL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9y\nZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cu\nY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rf\nd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9\nXCJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2NrZXIgQ29udGFpbmVyIChBbWFz\ncylcIj48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXJ0aWZh\nY3RzIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXJ0aWZhY3QgdG8gYSBEb2Nr\nZXIgY29udGFpbmVyLCBwZXJmb3JtIHNvbWUgb3BlcmF0aW9uIG9uIHRoZSBpbnB1dCBhbmQgcmV0\ndXJucyBpbmZvcm1hdGlvbiB0byBSZXNpbGllbnQuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50\nIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xaWJiOTNu\nPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDNo\ncnlzNFwiIG5hbWU9XCJEb2NrZXI6IFJ1biBEb2NrZXIgQ29udGFpbmVyXCIgcmVzaWxpZW50OnR5\ncGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1\naWQ9XCI3YTIyMGJlMy0wNWY3LTRiMTctYTFhNy05N2I0MDc2ZTE0YmVcIj57XCJpbnB1dHNcIjp7\nXCJlNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmZcIjp7XCJpbnB1dF90eXBlXCI6\nXCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJz\nZWxlY3RfdmFsdWVcIjpcIjM1NjYxOWY4LTNhZWItNDdhMy04NGJiLWMzMzVjMWExM2JhZlwifX19\nLFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwi\nXFxcIiZsdDtiJmd0O0RvY2tlciBJbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAg\nICZsdDticiZndDsmbHQ7YnImZ3Q7QSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2Ug\nJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNb\nXFxcImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0pXFxuICAgICAgICAgICAgICBcXG4j\nIElmIHRoZSBBdHRhY2htZW50IGF0dHJpYnV0ZSBvZiB0aGUgY29udGVudCBwYXlsb2FkIGlzIHNl\ndDsgd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhdHRhY2htZW50XFxuaWYgcmVzdWx0cy5jb250ZW50\nW1xcXCJhdHRhY2htZW50X25hbWVcXFwiXSAhPSBOb25lOlxcbiAgbm90ZV90ZXh0X2F0dGFjaG1l\nbnQgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBdHRhY2htZW50IHdpdGggbmFtZSB7\nMH0gXFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFt\nZVxcXCJdKVxcbiAgbm90ZV90ZXh0X3N0YXJ0ICs9IG5vdGVfdGV4dF9hdHRhY2htZW50XFxuXFxu\nIyBPdGhlcndpc2Ugd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhcnRpZmFjdFxcbmVsc2U6XFxuICBu\nb3RlX3RleHRfYXJ0aWZhY3QgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBcnRpZmFj\ndCBvZiBUeXBlOiAmbHQ7YiZndDt7MH0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICAgICAg\nICAgICAmbHQ7YnImZ3Q7IEFydGlmYWN0IFZhbHVlOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxc\nIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2FydGlmYWN0X3R5cGVc\nXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9pbnB1dFxcXCJdKVxcbiAgbm90ZV90ZXh0\nX3N0YXJ0ICs9IG5vdGVfdGV4dF9hcnRpZmFjdFxcbiAgICAgICAgICAgICAgXFxubm90ZV90ZXh0\nX2VuZCA9IFxcXCJcXFwiXFxcIiZsdDticiZndDtDb250YWluZXIgSUQgOiAmbHQ7YiZndDt7MH0m\nbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Q29udGFpbmVyIGV4aXQgY29kZSA6\nICZsdDtiJmd0O3sxfSZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnIm\nZ3Q7IENvbnRhaW5lciBMb2dzIGhhdmUgYmVlbiBzYXZlZCBhcyBhbiBhdHRhY2htZW50LlxcbiAg\nICAgICAgICAgICAgQ29udGFpbmVyIFN0YXRzLCBMb2dzLCBGdW5jdGlvbiBJbnB1dHMgb3IgUnVu\nIFRpbWUgTWV0cmljcyBhcmUgYWxzbyBhdmFpbGFibGUgYXMgcGFydCBvZiB0aGUgcmVzdWx0IHBh\neWxvYWRcXFwiXFxcIlxcXCIuZm9ybWF0KFxcbiAgICAgICAgICAgICAgICByZXN1bHRzLmNvbnRl\nbnRbXFxcImNvbnRhaW5lcl9pZFxcXCJdLCByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9l\neGl0X3N0YXR1c1xcXCJdKVxcblxcbm5vdGVfdGV4dCA9IG5vdGVfdGV4dF9zdGFydCtub3RlX3Rl\neHRfZW5kXFxuaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQobm90ZV90ZXh0\nKSlcXG5cXG50cnk6XFxuICAgIGRlcyA9IGFydGlmYWN0LmRlc2NyaXB0aW9uLmNvbnRlbnRcXG5l\neGNlcHQgRXhjZXB0aW9uOlxcbiAgZGVzID0gTm9uZVxcbiAgXFxuaWYgZGVzIGlzIE5vbmU6XFxu\nICAjIFVuY29tbWVudCB0aGlzIGxpbmUgdG8gaGF2ZSB0aGUgQW1hc3Mgc3ViZG9tYWluIHJlc3Vs\ndHMgYXBwZW5kZWQgdG8gdGhlIGRlc2NyaXB0IG9mIHRoZSBhcnRpZmFjdFxcbiAgI2FydGlmYWN0\nLmRlc2NyaXB0aW9uID0gdVxcXCJcXFwiXFxcIkRvY2tlciBJbnRlZ3JhdGlvbjpcXFxcbiBBcnRp\nZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9ICBcXFxcbiBSZXN1bHRzOlxcXFxu\nezF9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxc\nIl1bXFxcIm5hbWVcXFwiXSxyZXN1bHRzLmNvbnRlbnRbXFxcImxvZ3NcXFwiXSlcXG4gIFxcbiAg\nYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBB\ncnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1h\ndChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG5l\nbHNlOlxcbiAgIyBVbmNvbW1lbnQgdGhpcyBsaW5lIHRvIGhhdmUgdGhlIEFtYXNzIHN1YmRvbWFp\nbiByZXN1bHRzIGFwcGVuZGVkIHRvIHRoZSBkZXNjcmlwdCBvZiB0aGUgYXJ0aWZhY3QgXFxuICAj\nYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSBkZXMgKyB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0\naW9uOiBBcnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9IFxcXFxuIFJlc3Vs\ndHM6XFxcXG57MX1cXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJf\naW1hZ2VcXFwiXVtcXFwibmFtZVxcXCJdLHJlc3VsdHMuY29udGVudFtcXFwibG9nc1xcXCJdKVxc\nbiAgXFxuICBhcnRpZmFjdC5kZXNjcmlwdGlvbiA9IGRlcyArIHVcXFwiXFxcIlxcXCJEb2NrZXIg\nSW50ZWdyYXRpb246IEFydGlmYWN0IHdhcyBzY2FubmVkIGJ5IGRvY2tlciBpbWFnZSB7MH1cXFwi\nXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW1hZ2VcXFwiXVtcXFwi\nbmFtZVxcXCJdKVxcbiAgXFxuICBcXG5yb3cgPSBpbmNpZGVudC5hZGRSb3coXFxcImRvY2tlcl9p\nbnRlZ3JhdGlvbl9pbnZvY2F0aW9uc1xcXCIpXFxucm93W1xcXCJkb2NrZXJfdGltZXN0YW1wXFxc\nIl0gPSByZXN1bHRzW1xcXCJtZXRyaWNzXFxcIl1bXFxcInRpbWVzdGFtcF9lcG9jaFxcXCJdIG9y\nIDBcXG5yb3dbXFxcImRvY2tlcl9jb250YWluZXJfaWRcXFwiXSA9IHJlc3VsdHMuY29udGVudFtc\nXFwiY29udGFpbmVyX2lkXFxcIl1cXG5yb3dbXFxcImRvY2tlcl9pbWFnZVxcXCJdID0gcmVzdWx0\ncy5pbnB1dHNbXFxcImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl1cXG5cXG5yb3dbXFxc\nImRvY2tlcl9hcnRpZmFjdF90eXBlXFxcIl0gPSByZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2Fy\ndGlmYWN0X3R5cGVcXFwiXVxcbnJvd1tcXFwiZG9ja2VyX2FydGlmYWN0X3ZhbHVlXFxcIl0gPSBy\nZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2lucHV0XFxcIl1cXG5cIixcInByZV9wcm9jZXNzaW5n\nX3NjcmlwdFwiOlwiaW5wdXRzLmRvY2tlcl9pbnB1dCA9IGFydGlmYWN0LnZhbHVlXFxuaW5wdXRz\nLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWQgXFxuaW5wdXRzLmRvY2tlcl9hcnRpZmFjdF90eXBl\nID0gYXJ0aWZhY3QudHlwZVwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVu\ndHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18xaWJiOTNuPC9pbmNvbWluZz48b3V0Z29pbmc+U2Vx\ndWVuY2VGbG93XzFtMjJkcDA8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBp\nZD1cIlNlcXVlbmNlRmxvd18xaWJiOTNuXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3ht\nXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMDNocnlzNFwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRF\ndmVudF8wNnF1cDViXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18xbTIyZHAwPC9pbmNvbWluZz48\nL2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMW0yMmRwMFwiIHNvdXJj\nZVJlZj1cIlNlcnZpY2VUYXNrXzAzaHJ5czRcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8wNnF1cDVi\nXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5T\ndGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRp\nb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVh\nc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48L3Byb2Nlc3M+PGJw\nbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBi\ncG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5T\naGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8x\nNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwi\nMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVs\nPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRB\nbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21n\nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIv\nPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2Np\nYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2Rp\nOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48\nL2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VU\nYXNrXzAzaHJ5czRcIiBpZD1cIlNlcnZpY2VUYXNrXzAzaHJ5czRfZGlcIj48b21nZGM6Qm91bmRz\nIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzg3XCIgeT1cIjE2NlwiLz48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18x\naWJiOTNuXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkzbl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4\nPVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBv\naW50IHg9XCIzODdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRp\nOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjI5\nMi41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJw\nbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRFdmVudF8wNnF1cDViXCIgaWQ9XCJFbmRF\ndmVudF8wNnF1cDViX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZc\nIiB4PVwiNjQyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjY2MFwiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBN\nTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi\nU2VxdWVuY2VGbG93XzFtMjJkcDBcIiBpZD1cIlNlcXVlbmNlRmxvd18xbTIyZHAwX2RpXCI+PG9t\nZ2RpOndheXBvaW50IHg9XCI0ODdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi\nLz48b21nZGk6d2F5cG9pbnQgeD1cIjY0MlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0\naD1cIjBcIiB4PVwiNTY0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k\naTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZp\nbml0aW9ucz4iLCAidmVyc2lvbiI6IDEyfSwgImFjdGlvbnMiOiBbXX0sIHsid29ya2Zsb3dfaWQi\nOiA1MywgIm5hbWUiOiAiRXhhbXBsZTogRG9ja2VyOlNlbmQgQXR0YWNobWVudCBUbyBEb2NrZXIg\nQ29udGFpbmVyIChWb2xhdGlsaXR5KSIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfc2Vu\nZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNo\nbWVudCIsICJkZXNjcmlwdGlvbiI6ICJBbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXR0\nYWNobWVudHMgd2hpY2ggd2lsbCwgd2hlbiBpbnZva2VkLCBzZW5kIHRoZSBhdHRhY2htZW50IHRv\nIGEgRG9ja2VyIGNvbnRhaW5lciwgcGVyZm9ybSBzb21lIG9wZXJhdGlvbiBvbiB0aGUgaW5wdXQg\nYW5kIHJldHVybnMgaW5mb3JtYXRpb24gdG8gUmVzaWxpZW50LiIsICJjcmVhdG9yX2lkIjogImFs\nZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWxmcmVkQHdheW5lY29y\ncC5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1MjQ5Mjg3OTc4MywgImV4cG9ydF9rZXki\nOiAiZG9ja2VyX3NlbmRfYXR0YWNobWVudF90b19kb2NrZXJfY29udGFpbmVyIiwgInV1aWQiOiAi\nNWM5MjBhM2YtMzIxOC00MzFiLTk2NzItMDRiNTliNmUzYzdiIiwgImNvbnRlbnQiOiB7Indvcmtm\nbG93X2lkIjogImRvY2tlcl9zZW5kX2F0dGFjaG1lbnRfdG9fZG9ja2VyX2NvbnRhaW5lciIsICJ4\nbWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRp\nb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwi\nIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElc\nIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIg\neG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHht\nbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNk\nPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8v\nd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0\ndHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZG9ja2VyX3NlbmRfYXR0\nYWNobWVudF90b19kb2NrZXJfY29udGFpbmVyXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9\nXCJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBdHRhY2htZW50IFRvIERvY2tlciBDb250YWluZXIgKFZv\nbGF0aWxpdHkpXCI+PGRvY3VtZW50YXRpb24+QW4gZXhhbXBsZSB3b3JrZmxvdyBzY29wZWQgZm9y\nIEF0dGFjaG1lbnRzIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXR0YWNobWVu\ndCB0byBhIERvY2tlciBjb250YWluZXIsIHBlcmZvcm0gc29tZSBvcGVyYXRpb24gb24gdGhlIGlu\ncHV0IGFuZCByZXR1cm5zIGluZm9ybWF0aW9uIHRvIFJlc2lsaWVudC48L2RvY3VtZW50YXRpb24+\nPHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VG\nbG93XzBtbjBzMTU8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2\naWNlVGFza18xM3l6ZHd5XCIgbmFtZT1cIkRvY2tlcjogUnVuIERvY2tlciBDb250YWluZXJcIiBy\nZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6\nZnVuY3Rpb24gdXVpZD1cIjdhMjIwYmUzLTA1ZjctNGIxNy1hMWE3LTk3YjQwNzZlMTRiZVwiPntc\nImlucHV0c1wiOntcImU2ZDY2YmFjLTg0MWQtNDAzZi04MmZhLTg2MmRjM2NkMjIyZlwiOntcImlu\ncHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFs\ndWVcIjpbXSxcInNlbGVjdF92YWx1ZVwiOlwiN2YzNmEyODUtYjJiMC00MDFiLWEwY2EtYTQ3OGFl\nOTBiZTZiXCJ9fSxcImU4OTNlMDhkLTk0MDgtNDQ0OS04OWFiLTkyOGMxY2UxZTRkNFwiOntcImlu\ncHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFs\ndWVcIjpbXSxcInRleHRfdmFsdWVcIjpcInBzbGlzdFwifX19LFwicG9zdF9wcm9jZXNzaW5nX3Nj\ncmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwiXFxcIiZsdDtiJmd0O0RvY2tlciBJ\nbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnImZ3Q7\nQSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2UgJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0\nO1xcXCJcXFwiXFxcIi5mb3JtYXQodVxcXCI6XFxcIi5qb2luKFtyZXN1bHRzLmlucHV0c1tcXFwi\nZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9vcGVyYXRpb25cXFwiXV0pKVxcbiAgICAgICAgICAgICAgXFxuIyBJZiB0aGUgQXR0YWNobWVu\ndCBhdHRyaWJ1dGUgb2YgdGhlIGNvbnRlbnQgcGF5bG9hZCBpcyBzZXQ7IHdlIGFyZSBkZWFsaW5n\nIHdpdGggYW4gYXR0YWNobWVudFxcbmlmIHJlc3VsdHMuY29udGVudFtcXFwiYXR0YWNobWVudF9u\nYW1lXFxcIl0gIT0gTm9uZTpcXG4gIG5vdGVfdGV4dF9hdHRhY2htZW50ID0gdVxcXCJcXFwiXFxc\nIiZsdDticiZndDsgT24gYW4gQXR0YWNobWVudCB3aXRoIG5hbWUgJmx0O2ImZ3Q7ezB9Jmx0Oy9i\nJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5jb250ZW50W1xcXCJhdHRhY2htZW50X25h\nbWVcXFwiXSlcXG4gIG5vdGVfdGV4dF9zdGFydCArPSBub3RlX3RleHRfYXR0YWNobWVudFxcblxc\nbiMgT3RoZXJ3aXNlIHdlIGFyZSBkZWFsaW5nIHdpdGggYW4gYXJ0aWZhY3RcXG5lbHNlOlxcbiAg\nbm90ZV90ZXh0X2FydGlmYWN0ID0gdVxcXCJcXFwiXFxcIiZsdDticiZndDsgT24gYW4gQXJ0aWZh\nY3Qgb2YgVHlwZTogJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcbiAgICAgICAgICAgICAgICAgICAg\nICAgICAgJmx0O2JyJmd0OyBBcnRpZmFjdCBWYWx1ZTogJmx0O2ImZ3Q7ezF9Jmx0Oy9iJmd0O1xc\nXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9hcnRpZmFjdF90eXBl\nXFxcIl0sIHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW5wdXRcXFwiXSlcXG4gIG5vdGVfdGV4\ndF9zdGFydCArPSBub3RlX3RleHRfYXJ0aWZhY3RcXG4gICAgICAgICAgICAgIFxcbm5vdGVfdGV4\ndF9lbmQgPSBcXFwiXFxcIlxcXCImbHQ7YnImZ3Q7Q29udGFpbmVyIElEIDogJmx0O2ImZ3Q7ezB9\nJmx0Oy9iJmd0O1xcbiAgICAgICAgICAgICAgJmx0O2JyJmd0O0NvbnRhaW5lciBleGl0IGNvZGUg\nOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Jmx0O2Jy\nJmd0OyBDb250YWluZXIgTG9ncyBoYXZlIGJlZW4gc2F2ZWQgYXMgYW4gYXR0YWNobWVudC5cXG4g\nICAgICAgICAgICAgIENvbnRhaW5lciBTdGF0cywgTG9ncywgRnVuY3Rpb24gSW5wdXRzIG9yIFJ1\nbiBUaW1lIE1ldHJpY3MgYXJlIGFsc28gYXZhaWxhYmxlIGFzIHBhcnQgb2YgdGhlIHJlc3VsdCBw\nYXlsb2FkXFxcIlxcXCJcXFwiLmZvcm1hdChcXG4gICAgICAgICAgICAgICAgcmVzdWx0cy5jb250\nZW50W1xcXCJjb250YWluZXJfaWRcXFwiXSwgcmVzdWx0cy5jb250ZW50W1xcXCJjb250YWluZXJf\nZXhpdF9zdGF0dXNcXFwiXSlcXG5cXG5ub3RlX3RleHQgPSBub3RlX3RleHRfc3RhcnQrbm90ZV90\nZXh0X2VuZFxcblxcbiMgSWYgd2UgYXJlIGRlYWxpbmcgd2l0aCBhIHRhc2sgbGV2ZWwgYXR0YWNo\nbWVudCwgdGhlbiBhZGQgYSBub3RlIHRvIHRoZSB0YXNrIG5vdCB0aGUgaW5jaWRlbnRcXG5pZiB0\nYXNrOlxcbiAgdGFzay5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChub3RlX3RleHQpKVxc\nbmVsc2U6XFxuICBpbmNpZGVudC5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChub3RlX3Rl\neHQpKVxcblxcbiMgQWRkIGFuIGVudHJ5IHRvIHRoZSBkb2NrZXJfaW50ZWdyYXRpb25faW52b2Nh\ndGlvbnMgRGF0YXRhYmxlXFxucm93ID0gaW5jaWRlbnQuYWRkUm93KFxcXCJkb2NrZXJfaW50ZWdy\nYXRpb25faW52b2NhdGlvbnNcXFwiKVxcblxcbmlmIFxcXCJ0YXNrXFxcIiBpbiByZXN1bHRzLmNv\nbnRlbnRbXFxcInJlc19saW5rc1xcXCJdW1xcXCJyZXNfb2JqZWN0XFxcIl06XFxuICByb3dbXFxc\nImRvY2tlcl9saW5rc1xcXCJdID0gdVxcXCJcXFwiXFxcIiZsdDthIGhyZWY9XFxcInt9XFxcIiZn\ndDt7fSZsdDsvYSZndDtcXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuY29udGVudFtcXFwicmVz\nX2xpbmtzXFxcIl1bXFxcInJlc19vYmplY3RcXFwiXSwgXFxcIlRhc2sgTGlua1xcXCIpXFxuXFxu\ncm93W1xcXCJkb2NrZXJfdGltZXN0YW1wXFxcIl0gPSByZXN1bHRzW1xcXCJtZXRyaWNzXFxcIl1b\nXFxcInRpbWVzdGFtcF9lcG9jaFxcXCJdIG9yIDBcXG5yb3dbXFxcImRvY2tlcl9jb250YWluZXJf\naWRcXFwiXSA9IHJlc3VsdHMuY29udGVudFtcXFwiY29udGFpbmVyX2lkXFxcIl1cXG5yb3dbXFxc\nImRvY2tlcl9pbWFnZVxcXCJdID0gdVxcXCI6XFxcIi5qb2luKFtyZXN1bHRzLmlucHV0c1tcXFwi\nZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9vcGVyYXRpb25cXFwiXV0pXFxucm93W1xcXCJkb2NrZXJfYXR0YWNobWVudF9uYW1lXFxcIl0g\nPSByZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFtZVxcXCJdXFxuXCIsXCJwcmVfcHJv\nY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5pbmNpZGVudF9pZCA9IGluY2lkZW50LmlkIFxcblxc\nbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMgdGhlIHRhc2tfaWQgYXZhaWxhYmxlLCBnYXRoZXIgaXQg\naW5jYXNlIHdlIG5lZWQgaXQuXFxuaWYgdGFzazpcXG4gIGlucHV0cy50YXNrX2lkID0gdGFzay5p\nZFxcbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMgdGhlIGF0dGFjaG1lbnRfaWQgYXZhaWxhYmxlLCBn\nYXRoZXIgaXQgaW5jYXNlIHdlIG5lZWQgaXQuXFxuaWYgYXR0YWNobWVudDpcXG4gIGlucHV0cy5h\ndHRhY2htZW50X2lkID0gYXR0YWNobWVudC5pZFxcblxcbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMg\ndGhlIGFydGlmYWN0X2lkIGF2YWlsYWJsZSwgZ2F0aGVyIGl0IGluY2FzZSB3ZSBuZWVkIGl0Llxc\nbnRyeTogXFxuICBpZiBhcnRpZmFjdDpcXG4gICAgaW5wdXRzLmFydGlmYWN0X2lkID0gYXJ0aWZh\nY3QuaWRcXG5leGNlcHQ6XFxuICBwYXNzXCIsXCJyZXN1bHRfbmFtZVwiOlwiXCJ9PC9yZXNpbGll\nbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBt\nbjBzMTU8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMWZoa3ZiMDwvb3V0Z29pbmc+\nPC9zZXJ2aWNlVGFzaz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8weWNoeGhwXCI+PGluY29taW5n\nPlNlcXVlbmNlRmxvd18xZmhrdmIwPC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cg\naWQ9XCJTZXF1ZW5jZUZsb3dfMWZoa3ZiMFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzEzeXpk\nd3lcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8weWNoeGhwXCIvPjxzZXF1ZW5jZUZsb3cgaWQ9XCJT\nZXF1ZW5jZUZsb3dfMG1uMHMxNVwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRh\ncmdldFJlZj1cIlNlcnZpY2VUYXNrXzEzeXpkd3lcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4\ndD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhc\nIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRp\nYWdyYW1fMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9\nXCJCUE1OUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVu\ndF8xNTVhc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBN\nTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIg\neT1cIjIyM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0\naD1cIjEwMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lh\ndGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBN\nTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMTN5emR3eVwiIGlkPVwiU2VydmljZVRh\nc2tfMTN5emR3eV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwi\nIHg9XCIzNzdcIiB5PVwiMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzB5Y2h4aHBcIiBpZD1cIkVuZEV2ZW50XzB5Y2h4aHBf\nZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI2MjdcIiB5\nPVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3\naWR0aD1cIjBcIiB4PVwiNjQ1XCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3df\nMWZoa3ZiMFwiIGlkPVwiU2VxdWVuY2VGbG93XzFmaGt2YjBfZGlcIj48b21nZGk6d2F5cG9pbnQg\neD1cIjQ3N1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiNjI3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwi\nNTA3XCIgeT1cIjE4NC41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48\nYnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBtbjBzMTVcIiBpZD1c\nIlNlcXVlbmNlRmxvd18wbW4wczE1X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6\ndHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjM3N1wi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv\nbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjI0Mi41XCIgeT1cIjE4\nNC41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1O\nUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAidmVyc2lvbiI6IDUz\nfSwgImFjdGlvbnMiOiBbXX1dLCAicm9sZXMiOiBbXSwgIndvcmtzcGFjZXMiOiBbXSwgImZ1bmN0\naW9ucyI6IFt7ImlkIjogMzQsICJuYW1lIjogImRvY2tlcl9ydW5fZG9ja2VyX2NvbnRhaW5lciIs\nICJkaXNwbGF5X25hbWUiOiAiRG9ja2VyOiBSdW4gRG9ja2VyIENvbnRhaW5lciIsICJkZXNjcmlw\ndGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJBIGZ1bmN0aW9uIGludGVuZGVk\nIHRvIGJlIHVzZWQgdG8gY3JlYXRlIGEgRG9ja2VyIENvbnRhaW5lciBmcm9tIGFuIGltYWdlLCBm\nZWVkIGFuIGlucHV0IHRvIHRoZSBjb250YWluZXIgYW5kIHRoZW4gcmV0dXJuIHRoZSByZXN1bHRz\nLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX2RvY2tlciIsICJleHBvcnRfa2V5IjogImRv\nY2tlcl9ydW5fZG9ja2VyX2NvbnRhaW5lciIsICJ1dWlkIjogIjdhMjIwYmUzLTA1ZjctNGIxNy1h\nMWE3LTk3YjQwNzZlMTRiZSIsICJ2ZXJzaW9uIjogMTEsICJjcmVhdG9yIjogeyJpZCI6IDM5LCAi\ndHlwZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxheV9u\nYW1lIjogIkFsZnJlZCBQZW5ueXdvcnRoIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDM5\nLCAidHlwZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxh\neV9uYW1lIjogIkFsZnJlZCBQZW5ueXdvcnRoIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTUx\nOTUzNDYxMDc4LCAidmlld19pdGVtcyI6IFt7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6\nIG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9u\nIiwgImNvbnRlbnQiOiAiNjJkOTMxMDUtNzA1ZC00ODc2LTk4MTMtZTYwZWU0M2UxOWVkIiwgInNo\nb3dfbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjog\nbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24i\nLCAiY29udGVudCI6ICIxNjc3NzE2YS1hOTVlLTRmNTUtOGUzZS01Mzk5ZTZkM2JkOTYiLCAic2hv\nd19saW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBu\ndWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIs\nICJjb250ZW50IjogIjgxMWU5OWQ3LWQxOTQtNGNlOC04NmNjLWFmZjVlMDFhYjg1YyIsICJzaG93\nX2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51\nbGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwg\nImNvbnRlbnQiOiAiYmEzMTgyNjEtZWQ2YS00YTM4LWExODctOWUwYjY4ZDE2MDRmIiwgInNob3df\nbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVs\nbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAi\nY29udGVudCI6ICJlNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmYiLCAic2hvd19s\naW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxs\nLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJj\nb250ZW50IjogIjRmYzMwOWYxLTM0MDYtNDY0Zi1iZTZkLWQzNzlkYzIzZDQxMSIsICJzaG93X2xp\nbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGws\nICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNv\nbnRlbnQiOiAiMjBiNWY2NjEtYjU2Mi00YTc4LThhNDAtM2Q5YzlmMjRjYjY4IiwgInNob3dfbGlu\na19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwg\nImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29u\ndGVudCI6ICJlODkzZTA4ZC05NDA4LTQ0NDktODlhYi05MjhjMWNlMWU0ZDQiLCAic2hvd19saW5r\nX2hlYWRlciI6IGZhbHNlfV0sICJ3b3JrZmxvd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDU2LCAibmFt\nZSI6ICJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2NrZXIgQ29udGFpbmVyIChB\nbWFzcykiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rfd2l0\naF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRl\nc2NyaXB0aW9uIjogbnVsbCwgInV1aWQiOiBudWxsLCAiYWN0aW9ucyI6IFtdfSwgeyJ3b3JrZmxv\nd19pZCI6IDU1LCAibmFtZSI6ICJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2Nr\nZXIgQ29udGFpbmVyIChOU1JMKSIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfYW5hbHl6\nZV9hcnRpZmFjdF93aXRoX2RvY2tlcl9jb250YWluZXJfbnNybCIsICJvYmplY3RfdHlwZSI6ICJh\ncnRpZmFjdCIsICJkZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVsbCwgImFjdGlvbnMiOiBb\nXX0sIHsid29ya2Zsb3dfaWQiOiA1MywgIm5hbWUiOiAiRXhhbXBsZTogRG9ja2VyOlNlbmQgQXR0\nYWNobWVudCBUbyBEb2NrZXIgQ29udGFpbmVyIChWb2xhdGlsaXR5KSIsICJwcm9ncmFtbWF0aWNf\nbmFtZSI6ICJkb2NrZXJfc2VuZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiLCAib2Jq\nZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJkZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVs\nbCwgImFjdGlvbnMiOiBbXX1dfV19\n\"\"\"\n )", "def StaDefNginx(lines):\n\n if_n = True\n for i in lines:\n if i.startswith(\"nginx\"):\n if \"latest\" in i:\n start = lines.index(i)\n\n while if_n:\n for i in lines[start:]:\n if i == '\\n':\n if_n = False\n end = lines[start:].index(i)\n\n for i in lines[start:end + start]:\n\n if i.startswith(\"nginx\"):\n if \"latest\" in i:\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"nginx\").update(\n {\"Total\": num[-1] + \"MB\"}\n )\n\n if i.startswith(\"default base layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"nginx\").update(\n {\"Base_Layer\": num[0]}\n )\n\n if i.startswith(\"default microservice added layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"nginx\").update(\n {\"MicroService_layer\": num[0]}\n )", "def upload_nginx_conf():\n\n require('environment', provided_by=env.environments)\n template = os.path.join(env.templates_dir, 'nginx.conf')\n destination = os.path.join(env.services, 'nginx', '%(environment)s.conf' % env)\n _upload_template(template, destination, context=env, user=env.deploy_user)\n restart_nginx()", "def dockerfile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dockerfile\")", "def _dockerfile(self):\n return self.config.get('docker', {}).get('dockerfile', 'Dockerfile')", "def clr_from_nginx(lines):\n\n for i in lines[\n lines.index(\"[nginx] [INFO] Test clear docker image:\\n\"):\n lines.index(\"Clr-Nginx-Server\\n\")]:\n\n if i.startswith(\"Time taken for tests\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"clear\").get(\"nginx\").update(\n {\"Time taken for tests\": num[0]}\n )\n\n if i.endswith(\"[ms] (mean)\\n\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"clear\").get(\"nginx\").update(\n {\"Time per request\": num[0]}\n )\n\n if i.endswith(\"(mean, across all concurrent requests)\\n\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"clear\").get(\"nginx\").update(\n {\"Time per request(all)\": num[0]}\n )\n\n if i.startswith(\"Requests per second\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"clear\").get(\"nginx\").update(\n {\"Requests per second\": num[0]}\n )\n\n if i.startswith(\"Transfer rate\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"clear\").get(\"nginx\").update(\n {\"Transfer rate\": num[0]}\n )", "def build_base():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/requirements.txt')\n\n with cd('/srv/build'):\n run('docker build -t {base_image_name} .'.format(\n base_image_name=env.base_image_name,\n ))", "def content(self, content):\n if self.cache_content:\n self.cached_content = b2u(content)\n\n try:\n with self._open_dockerfile('wb') as dockerfile:\n dockerfile.write(u2b(content))\n except (IOError, OSError) as ex:\n logger.error(\"Couldn't write content to dockerfile: %r\", ex)\n raise", "def install_nginx_config():\n\n run('rm -f /etc/nginx/conf.d/*')\n run('cp -f /home/indabom/web/site/scripts/nginx/indabom-config /etc/nginx/conf.d/indabom.conf')\n run('service nginx configtest')\n run('service nginx reload')", "def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())", "def switch_nginx(*args):\n if len(args) == 1:\n version_name = args[0]\n if not NAME_REGEX.match(version_name):\n print 'Invalid version name'\n return\n else:\n print 'Incorrect arguments'\n return\n\n try:\n template = DOCKER_TEMPLATES_ENV.get_template(NGINX_TEMPLATE_FILE_NAME)\n except TemplateNotFound:\n print 'Template %s not found' % TEMPLATE_FILE_PATH\n return\n try:\n with open('/tmp/anbardari_read', 'w') as conf_file:\n conf_file.write(\n template.render(\n socket_path=os.path.join(SHARED_DIR_OUTSIDE_CONTAINER, version_name, GUNICORN_SOCKET_FILE_NAME),\n nginx_error_log_path=os.path.join(NGINX_LOG_DIR, 'error.log'),\n nginx_access_log_path=os.path.join(NGINX_LOG_DIR, 'access.log'),\n nginx_port=NGINX_PORT,\n nginx_server_name=NGINX_SERVER_NAME,\n development=DEBUG,\n version_name=version_name,\n base_dir=BASE_DIR,\n gunicorn_port=GUNICORN_PORT,\n )\n )\n except OSError as error:\n print str(error)\n return\n result = subprocess.call('sudo mv /tmp/anbardari_read /etc/nginx/sites-enabled/anbardari', shell=True)\n if result:\n return\n result = subprocess.check_output('sudo service nginx configtest', shell=True)\n if 'done' not in result:\n print 'Nginx configtest failed'\n return\n result = subprocess.call('sudo service nginx reload', shell=True)\n if result:\n print 'Reloading nginx failed.'\n return\n print 'Successfully configured nginx'", "def generate_nginx_config(ctx, path=None):\n if not path:\n path = PATH\n\n local_env = LocalEnvironment(path)\n\n nginx_opts = {}\n nginx_opts['papertrail_host'] = PAPERTRAIL_HOST\n nginx_opts['papertrail_port'] = PAPERTRAIL_PORT\n nginx_opts['rewrite_rules'] = REWRITE_RULES\n nginx_opts['sendfile'] = 'on'\n\n nginx_filename = os.path.join(path, 'nginx.conf')\n nginx_site_filename = os.path.join(path, 'default')\n\n local_env.render_template(template='nginx.j2', opts=nginx_opts, filename='nginx.conf')\n local_env.render_template(template='nginx_site.j2', opts=nginx_opts, filename='default')", "def StaClrNginx(lines):\n\n if_n = True\n for i in lines:\n if i.startswith(\"nginx\"):\n if \"latest\" in i:\n start = lines.index(i)\n\n while if_n:\n for i in lines[start:]:\n if i == '\\n':\n if_n = False\n end = lines[start:].index(i)\n\n for i in lines[start:end + start]:\n\n if i.startswith(\"clearlinux/nginx\"):\n if \"latest\" in i:\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"nginx\").update(\n {\"Total\": num[-1] + \"MB\"}\n )\n\n if i.startswith(\"clearlinux base layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"nginx\").update(\n {\"Base_Layer\": num[0]}\n )\n\n if i.startswith(\"clearlinux microservice added layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"nginx\").update(\n {\"MicroService_layer\": num[0]}\n )\n\n for i in lines[start:]:\n if i.startswith(\"clearlinux/nginx version:\\n\"):\n end = lines[start:].index(i) + 1\n num = re.findall(\"\\d+\\.?\\d*\", lines[start:][end])\n data.get(\"status_Clr\").get(\"nginx\").update(\n {\"VERSION_ID\": num[0]}\n )", "def _make_user_data(registry, tag, region):\n\n base_format = \"\"\"MIME-Version: 1.0\nContent-Type: multipart/mixed; boundary=\\\"==MYBOUNDARY==\\\"\n\n--==MYBOUNDARY==\nContent-Type: text/cloud-config; charset=\\\"us-ascii\\\"\n\nruncmd:\n\n- sudo yum update -y\n- sudo amazon-linux-extras install docker -y\n- sudo service docker start\n- sudo usermod -a -G docker ec2-user\n- curl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\n- unzip awscliv2.zip\n- sudo ./aws/install\n- ln -s /usr/bin/aws aws\n- aws ecr get-login-password --region {region}|docker login --username AWS --password-stdin {registry}\n- docker pull {tag}\n- docker run -p {bentoservice_port}:{bentoservice_port} {tag}\n\n--==MYBOUNDARY==--\n\"\"\".format( # noqa: E501\n registry=registry, tag=tag, region=region, bentoservice_port=BENTOSERVICE_PORT\n )\n encoded = base64.b64encode(base_format.encode(\"ascii\")).decode(\"ascii\")\n return encoded", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def _create_dockerfile(self, commands):\n import Utils\n\n user_id = Utils.get_sudo_user_id()\n dockerfile = '''FROM ubuntu:14.04\\nRUN apt-get update\\n\\n# Add user ubuntu.\\nRUN useradd -u {0} -ms /bin/bash ubuntu\\n\n # Set up base environment.\\nRUN apt-get install -yy \\ \\n software-properties-common \\ \\n\n python-software-properties \\ \\n wget \\ \\n curl \\ \\n git \\ \\n ipython \\ \\n sudo \\ \\n\n screen \\ \\n iptables \\nRUN echo \"ubuntu ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers\n \\nWORKDIR /home/ubuntu\\n\\nUSER ubuntu\\nENV HOME /home/ubuntu'''.format(user_id)\n\n flag = False\n\n for entry in commands:\n if isinstance(entry, list):\n dockerfile += '''\\n\\nRUN '''\n first = True\n flag = False\n for sub_entry in entry:\n if first is True:\n dockerfile += self._preprocess(sub_entry)\n first = False\n else:\n dockerfile += ''' && \\ \\n ''' + self._preprocess(sub_entry)\n else:\n if flag is False:\n dockerfile += '''\\n\\nRUN '''\n flag = True\n dockerfile += self._preprocess(entry)\n else:\n dockerfile += ''' && \\ \\n ''' + self._preprocess(entry)\n\n dockerfile += '''\\n\\n\\n'''\n\n dockerfile_file = DockerProvider.__get_new_dockerfile_name()\n with open(dockerfile_file, 'w') as Dockerfile:\n Dockerfile.write(dockerfile)\n named_dockerfile = tempfile.NamedTemporaryFile()\n named_dockerfile.write(dockerfile)\n named_dockerfile.seek(0)\n\n return named_dockerfile, dockerfile_file", "def generate_config(container_data, file_path):\n pass", "def get_content(self):\n # =====================================================================\n class NullUndefined(jinja2.Undefined):\n \"\"\"\n Class required to handle jinja2-variables inside the meta.yaml\n \"\"\"\n # -----------------------------------------------------------------\n def __unicode__(self):\n return six.text_type(self._undefined_name)\n\n # -----------------------------------------------------------------\n def __getattr__(self, attribute_name):\n return six.text_type(f'{self}.{attribute_name}')\n\n # -----------------------------------------------------------------\n def __getitem__(self, attribute_name):\n return f'{self}[\"{attribute_name}\"]'\n\n\n # =====================================================================\n class StrDict(dict):\n \"\"\"\n Class required to handle jinja2-variables inside the meta.yaml\n \"\"\"\n # -----------------------------------------------------------------\n def __getitem__(self, key, default=''):\n return self[key] if key in self else default\n\n return YAML(typ='base').load(\n (jinja2.Environment(undefined=NullUndefined)\n .from_string(self.path.open().read())\n .render(**dict(os=os,\n environ=StrDict(),\n load_setup_py_data=StrDict))))", "def create_nginx_config(server, port):\n\n if config_count() >= config_limit:\n abort(400)\n\n ip = request.remote_addr\n nginx_config = nginx_sites_enabled + '/' + server\n temp = sys.stdout\n sys.stdout = open(nginx_config, 'w')\n print template.render(server_name=server, proxy_addr=ip, port=port)\n sys.stdout.close()\n sys.stdout = temp\n\n call([\"/usr/sbin/service\", \"nginx\", \"restart\"])\n\n processes = [proc.name() for proc in process_iter()]\n\n if 'nginx' in processes:\n return jsonify(server_name=server, proxy_address=ip, port=port, \\\n config_count=config_count(), status=200)\n else:\n os.remove(nginx_config)\n call([\"/usr/sbin/service\", \"nginx\", \"restart\"])\n abort(500)", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def nginx(self):\n self.summarize_operation(\"Installing Nginx Web Server\")\n self.install_package(\"nginx\")" ]
[ "0.64565045", "0.6387392", "0.6265688", "0.6264885", "0.6121652", "0.6046972", "0.5944196", "0.5906597", "0.59064484", "0.58702147", "0.5849548", "0.5765242", "0.56611687", "0.5653609", "0.5642426", "0.56414825", "0.56217706", "0.5584245", "0.5574183", "0.55173683", "0.54997474", "0.5474511", "0.54463136", "0.53804946", "0.5365458", "0.5361542", "0.5330437", "0.53165853", "0.53132576", "0.53026867" ]
0.7623629
0
Holds the neccessary content for the config file for nginx with phpfpm support commented out
def get_config_file_content(self): config_content: List[str] = [ 'server {', ' listen {};'.format(self.port), '', ' ##', ' # PHP-FPM', ' ##', ' #location ~ \.php$ {', ' #include /etc/nginx/fastcgi_params;', ' #root /var/www/src;', ' #fastcgi_split_path_info ^(.+?\.php)(/.*)$;', ' #fastcgi_pass phpfpm:3002;', ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;', ' #}', '', ' location / {', ' root /var/www/src;', ' index index.html;' ' #index index.php;', ' #rewrite ^ /index.php?$args last; break;', ' }', '}' ] return config_content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configuration():", "def construct_nginx_config(nginx_root: str, nginx_webroot: str, http_port: int, https_port: int,\n other_port: int, default_server: bool, key_path: Optional[str] = None,\n cert_path: Optional[str] = None, wtf_prefix: str = 'le') -> str:\n key_path = key_path if key_path \\\n else pkg_resources.resource_filename('certbot_integration_tests', 'assets/key.pem')\n cert_path = cert_path if cert_path \\\n else pkg_resources.resource_filename('certbot_integration_tests', 'assets/cert.pem')\n return '''\\\n# This error log will be written regardless of server scope error_log\n# definitions, so we have to set this here in the main scope.\n#\n# Even doing this, Nginx will still try to create the default error file, and\n# log a non-fatal error when it fails. After that things will work, however.\nerror_log {nginx_root}/error.log;\n\n# The pidfile will be written to /var/run unless this is set.\npid {nginx_root}/nginx.pid;\n\nuser {user};\nworker_processes 1;\n\nevents {{\n worker_connections 1024;\n}}\n\n# “This comment contains valid Unicode”.\n\nhttp {{\n # Set an array of temp, cache and log file options that will otherwise default to\n # restricted locations accessible only to root.\n client_body_temp_path {nginx_root}/client_body;\n fastcgi_temp_path {nginx_root}/fastcgi_temp;\n proxy_temp_path {nginx_root}/proxy_temp;\n #scgi_temp_path {nginx_root}/scgi_temp;\n #uwsgi_temp_path {nginx_root}/uwsgi_temp;\n access_log {nginx_root}/error.log;\n\n # This should be turned off in a Virtualbox VM, as it can cause some\n # interesting issues with data corruption in delivered files.\n sendfile off;\n\n tcp_nopush on;\n tcp_nodelay on;\n keepalive_timeout 65;\n types_hash_max_size 2048;\n\n #include /etc/nginx/mime.types;\n index index.html index.htm index.php;\n\n log_format main '$remote_addr - $remote_user [$time_local] $status '\n '\"$request\" $body_bytes_sent \"$http_referer\" '\n '\"$http_user_agent\" \"$http_x_forwarded_for\"';\n\n default_type application/octet-stream;\n\n server {{\n # IPv4.\n listen {http_port} {default_server};\n # IPv6.\n listen [::]:{http_port} {default_server};\n server_name nginx.{wtf_prefix}.wtf nginx2.{wtf_prefix}.wtf;\n\n root {nginx_webroot};\n\n location / {{\n # First attempt to serve request as file, then as directory, then fall\n # back to index.html.\n try_files $uri $uri/ /index.html;\n }}\n }}\n\n server {{\n listen {http_port};\n listen [::]:{http_port};\n server_name nginx3.{wtf_prefix}.wtf;\n\n root {nginx_webroot};\n\n location /.well-known/ {{\n return 404;\n }}\n\n return 301 https://$host$request_uri;\n }}\n\n server {{\n listen {other_port};\n listen [::]:{other_port};\n server_name nginx4.{wtf_prefix}.wtf nginx5.{wtf_prefix}.wtf;\n }}\n\n server {{\n listen {http_port};\n listen [::]:{http_port};\n listen {https_port} ssl;\n listen [::]:{https_port} ssl;\n if ($scheme != \"https\") {{\n return 301 https://$host$request_uri;\n }}\n server_name nginx6.{wtf_prefix}.wtf nginx7.{wtf_prefix}.wtf;\n\n ssl_certificate {cert_path};\n ssl_certificate_key {key_path};\n }}\n}}\n'''.format(nginx_root=nginx_root, nginx_webroot=nginx_webroot, user=getpass.getuser(),\n http_port=http_port, https_port=https_port, other_port=other_port,\n default_server='default_server' if default_server else '', wtf_prefix=wtf_prefix,\n key_path=key_path, cert_path=cert_path)", "def install_nginx_config():\n\n run('rm -f /etc/nginx/conf.d/*')\n run('cp -f /home/indabom/web/site/scripts/nginx/indabom-config /etc/nginx/conf.d/indabom.conf')\n run('service nginx configtest')\n run('service nginx reload')", "def config():", "def config():", "def nginx():\n\n get_details()\n\n context = {\n \"site_name\": env.site_name,\n \"paths\": env.paths,\n \"ip_address\": env.ip_address,\n \"site_is_secure\": env.site_is_secure,\n \"app_server\": env.app_server,\n }\n\n nginx_path = '/etc/nginx/sites-available'\n\n if exists(nginx_path):\n with cd(nginx_path):\n if exists(env.site_name):\n print \"nginx site configuration already exists!\"\n return\n else:\n upload_template(\"nginx_conf.txt\", \n env.site_name,\n context,\n use_jinja=True,\n template_dir=JINJA_TEMPLATE_PATH,\n use_sudo=True)\n print \"Created nginx site configuration file. Enabling site...\"\n sudo('ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s' % (env.site_name, env.site_name))\n #print \"Site enabled. Reloading nginx...\"\n #sudo('/etc/init.d/nginx reload')\n return\n else:\n print \"It doesn't seem like you have nginx installed.\"\n return", "def config_content(self, command, vars):\n modules = [line.strip()\n for line in self.dist.get_metadata_lines('top_level.txt')\n if line.strip() and not line.strip().startswith('#')]\n if not modules:\n print >> sys.stderr, 'No modules are listed in top_level.txt'\n print >> sys.stderr, \\\n 'Try running python setup.py egg_info to regenerate that file'\n for module in modules:\n if pkg_resources.resource_exists(module, self.config_file):\n return self.template_renderer(\n pkg_resources.resource_string(module, self.config_file),\n vars, filename=self.config_file)\n # Legacy support for the old location in egg-info\n return super(PyramidInstaller, self).config_content(command, vars)", "def create_nginx_config(server, port):\n\n if config_count() >= config_limit:\n abort(400)\n\n ip = request.remote_addr\n nginx_config = nginx_sites_enabled + '/' + server\n temp = sys.stdout\n sys.stdout = open(nginx_config, 'w')\n print template.render(server_name=server, proxy_addr=ip, port=port)\n sys.stdout.close()\n sys.stdout = temp\n\n call([\"/usr/sbin/service\", \"nginx\", \"restart\"])\n\n processes = [proc.name() for proc in process_iter()]\n\n if 'nginx' in processes:\n return jsonify(server_name=server, proxy_address=ip, port=port, \\\n config_count=config_count(), status=200)\n else:\n os.remove(nginx_config)\n call([\"/usr/sbin/service\", \"nginx\", \"restart\"])\n abort(500)", "def parse_config(self):\n # TODO: parse config file\n pass", "def wsgi_conf():\n\n get_details()\n\n site_dir = posixpath.join(env.paths[\"sites\"], env.site_name)\n if not exists(site_dir):\n run(\"mkdir -p %s\" % site_dir)\n\n filename = \"%s_wsgi.py\" % env.project_name\n\n context = {\n \"site_name\": env.site_name,\n \"project_name\": env.project_name,\n \"python_version\": env.python_version,\n \"paths\": env.paths,\n }\n\n # Set up the wsgi dir.\n if env.app_server=='apache':\n wsgi_dir = posixpath.join(site_dir, \"apache\")\n else:\n wsgi_dir = posixpath.join(site_dir, \"src/src-%s\" % env.project_name)\n\n with cd(wsgi_dir):\n if not exists(filename):\n print \"Template path: %s\" % JINJA_TEMPLATE_PATH\n upload_template(\"wsgi_conf_%s.txt\" % env.app_server,\n filename,\n context,\n use_jinja=True,\n template_dir=JINJA_TEMPLATE_PATH)\n else:\n\t\t\t#TODO: If it exists, append to it\n print \"This file already exists.\"\n return\n run(\"chmod 654 %s\" % filename)", "def configServer():\n try:\n config = open(r\"./server.conf\",\"r+\")\n except IOError,e:\n print e\n return 0\n configLines = []\n try:\n while True:\n configLines.append(config.next())\n except StopIteration:\n pass\n finally:\n config.close()\n configInfo = {}\n for line in configLines:\n if line[0] == \"#\" or line[0] == \"\\n\":\n continue\n configLineArgumentList = line[:-1].split(\"=\")\n key = configLineArgumentList[0]\n value = configLineArgumentList[1]\n configInfo.update({key:value})\n logging.info(\"Configuration done sucssesfully\")\n return configInfo", "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def _config_nginx():\n if exists('/etc/nginx/sites-available/', use_sudo=False, verbose=False):\n print \"/etc/nginx/sites-available/ exists.\"\n put(os.path.join(env.local_directory, 'installer/simple_nginx_config'),\n '/etc/nginx/sites-available/default',\n use_sudo=True)\n else:\n print \"Could't configure nginx because /etc/nginx/sites-available/ does not exist.\"", "def pre_process_information(self):\n self.logger.debug(\n colorama.Fore.BLUE\n + \"jsnapy.cfg file location used : %s\" % get_config_location(),\n extra=self.log_detail,\n )\n self.logger.debug(\n colorama.Fore.BLUE\n + \"Configuration file location used : %s\"\n % get_path(\"DEFAULT\", \"config_file_path\"),\n extra=self.log_detail,\n )", "def config():\n return {\n \"CLEAN_OUTBOX\": \"TRUE\",\n \"COMPONENT_NAME\": \"testing-unpacker\",\n \"DEST_SITE\": \"WIPAC\",\n \"FILE_CATALOG_REST_TOKEN\": \"fake-file-catalog-token\",\n \"FILE_CATALOG_REST_URL\": \"http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"unpacking\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"completed\",\n \"PATH_MAP_JSON\": \"/tmp/lta/testing/path_map.json\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"NERSC\",\n \"UNPACKER_OUTBOX_PATH\": \"/tmp/lta/testing/unpacker/outbox\",\n \"UNPACKER_WORKBOX_PATH\": \"/tmp/lta/testing/unpacker/workbox\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def init_cfg_spec(self):\n\n super(PidfileApp, self).init_cfg_spec()\n\n if 'general' not in self.cfg_spec:\n self.cfg_spec['general'] = {}\n\n if not self._default_pidfilename:\n self._default_pidfilename = self.appname + '.pid'\n\n pidfile_spec = \"string(default = '%s')\" % (\n to_unicode_or_bust(self._default_pidfilename))\n\n if 'pidfile' not in self.cfg_spec['general']:\n self.cfg_spec['general']['pidfile'] = pidfile_spec\n self.cfg_spec['general'].comments['pidfile'].append('')\n self.cfg_spec['general'].comments['pidfile'].append(\n 'The filename of the pidfile (absolute or relative to base_dir).')", "def generate_nginx_config(ctx, path=None):\n if not path:\n path = PATH\n\n local_env = LocalEnvironment(path)\n\n nginx_opts = {}\n nginx_opts['papertrail_host'] = PAPERTRAIL_HOST\n nginx_opts['papertrail_port'] = PAPERTRAIL_PORT\n nginx_opts['rewrite_rules'] = REWRITE_RULES\n nginx_opts['sendfile'] = 'on'\n\n nginx_filename = os.path.join(path, 'nginx.conf')\n nginx_site_filename = os.path.join(path, 'default')\n\n local_env.render_template(template='nginx.j2', opts=nginx_opts, filename='nginx.conf')\n local_env.render_template(template='nginx_site.j2', opts=nginx_opts, filename='default')", "def config(self) -> Dict[str, Any]:", "def get_config_contents() -> str:\n config_file = os.environ.get(\"PYP_CONFIG_PATH\")\n if config_file is None:\n return \"\"\n try:\n with open(config_file, \"r\") as f:\n return f.read()\n except FileNotFoundError as e:\n raise PypError(f\"Config file not found at PYP_CONFIG_PATH={config_file}\") from e", "def config():\n config_django()\n config_svisor()", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def __analyze_config(self):\n result = {}\n with open(self.file) as f:\n data = f.readlines()\n temp_key = ''\n for line in data:\n if line[0] == '\t' or line[0] == ';':\n result[temp_key].append(line.strip())\n else:\n temp_key = line.strip()\n result[temp_key] = []\n return result", "def __init__(self, custom_file=None):\n self.home = os.path.abspath(os.path.expanduser('~'))\n # Static Defaults\n defaults = \\\n {\n 'cfg_sn_username' : '',\n 'cfg_sn_password' : '',\n 'cfg_nt_ext' : 'txt',\n 'cfg_nt_path' : os.path.join(self.home, 'Simplenote'),\n 'cfg_nt_trashpath' : '.trash',\n 'cfg_nt_filenamelen' : '60',\n 'cfg_log_level' : 'info'\n }\n\n cp = configparser.SafeConfigParser(defaults)\n if custom_file is not None:\n self.configs_read = cp.read([custom_file])\n else:\n self.configs_read = cp.read([os.path.join(self.home, '.snsync')])\n\n cfg_sec = 'snsync'\n\n if not cp.has_section(cfg_sec):\n cp.add_section(cfg_sec)\n\n self.configs = collections.OrderedDict()\n\n #\n # Environment Varialbles over-ride config file settings.\n # Config files are cfg_abc\n # Envs are sn_abc\n #\n\n if os.environ.get('sn_username') is None:\n val_sn_username = cp.get(cfg_sec, 'cfg_sn_username', raw=True)\n else:\n val_sn_username = os.environ.get('sn_username')\n self.configs['sn_username'] = [val_sn_username, 'Simplenote Username']\n\n if os.environ.get('sn_password') is None:\n val_sn_passowrd = cp.get(cfg_sec, 'cfg_sn_password', raw=True)\n else:\n val_sn_passowrd = os.environ.get('sn_password')\n self.configs['sn_password'] = [val_sn_passowrd, 'Simplenote Password']\n\n if os.environ.get('sn_nt_ext') is None:\n val_sn_nt_ext = cp.get(cfg_sec, 'cfg_nt_ext')\n else:\n val_sn_nt_ext = os.environ.get('sn_nt_ext')\n self.configs['cfg_nt_ext'] = [val_sn_nt_ext, 'Note file extension']\n\n if os.environ.get('sn_nt_path') is None:\n val_sn_nt_path = cp.get(cfg_sec, 'cfg_nt_path')\n else:\n val_sn_nt_path = os.environ.get('sn_nt_path')\n self.configs['cfg_nt_path'] = [val_sn_nt_path, 'Note storage path']\n\n if os.environ.get('sn_nt_trashpath') is None:\n val_sn_nt_trashpath = cp.get(cfg_sec, 'cfg_nt_trashpath')\n else:\n val_sn_nt_trashpath = os.environ.get('sn_nt_trashpath')\n self.configs['cfg_nt_trashpath'] = [val_sn_nt_trashpath, 'Note Trash Bin Folder for deleted notes']\n\n if os.environ.get('sn_nt_filenamelen') is None:\n val_sn_nt_filenamelen = cp.get(cfg_sec, 'cfg_nt_filenamelen')\n else:\n val_sn_nt_filenamelen = os.environ.get('sn_nt_filenamelen')\n self.configs['cfg_nt_filenamelen'] = [val_sn_nt_filenamelen, 'Length of Filename']\n\n if os.environ.get('sn_log_level') is None:\n val_sn_log_level = cp.get(cfg_sec, 'cfg_log_level')\n else:\n val_sn_log_level = os.environ.get('sn_log_level')\n self.configs['cfg_log_level'] = [val_sn_log_level, 'snsync log level']\n\n # Dynamic Defaults\n if os.environ.get('sn_db_path') is None:\n if cp.has_option(cfg_sec, 'cfg_db_path'):\n val_sn_db_path = cp.get(cfg_sec, 'cfg_db_path')\n else:\n val_sn_db_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.sqlite')\n else:\n val_sn_db_path = os.environ.get('sn_db_path')\n self.configs['cfg_db_path'] = [val_sn_db_path, 'snsync database location']\n\n if os.environ.get('sn_log_path') is None:\n if cp.has_option(cfg_sec, 'cfg_log_path'):\n val_sn_log_path = cp.get(cfg_sec, 'cfg_log_path')\n else:\n val_sn_log_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.log')\n else:\n val_sn_log_path = os.environ.get('sn_log_path')\n self.configs['cfg_log_path'] = [val_sn_log_path, 'snsync log location']", "def setup():\n print(cyan('Configuring nginx on {}'.format(env.stage)))\n context = {\n 'ssl_letsencrypt': False,\n 'ssl_with_dhparam': False,\n 'ssl_cert': None,\n 'ssl_key': None,\n }\n\n if ctx('ssl.letsencrypt'):\n execute('letsencrypt.setup')\n elif ctx('ssl.key') and ctx('ssl.cert'):\n ssl = True\n dhparams = ctx('ssl.dhparam', default=False)\n key = ctx('ssl.key', default=False)\n cert = ctx('ssl.cert', default=False)\n\n if key and files.exists(key, use_sudo=True):\n context['ssl_key'] = ctx('ssl.key')\n if cert and files.exists(cert, use_sudo=True):\n context['ssl_cert'] = ctx('ssl.cert')\n if dhparams and files.exists(dhparams, use_sudo=True):\n context['ssl_with_dhparam'] = True\n if ssl:\n upload_template(\n 'nginx_ssl.template', ctx('nginx.config_path'), context=context)\n else:\n upload_template(\n 'nginx.template', ctx('nginx.config_path'), context=context)\n\n if files.exists(ctx('nginx.document_root'), use_sudo=True):\n sudo('chown -R {user}:{group} {path}'.format(\n path=ctx('nginx.document_root'), user=ctx('system.user'),\n group=ctx('system.group')))\n\n sudo('service nginx reload')", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def upload_nginx_conf():\n\n require('environment', provided_by=env.environments)\n template = os.path.join(env.templates_dir, 'nginx.conf')\n destination = os.path.join(env.services, 'nginx', '%(environment)s.conf' % env)\n _upload_template(template, destination, context=env, user=env.deploy_user)\n restart_nginx()", "def __writeConfig(self):\n page = None\n\n #TODO: get values of configurations here\n particles = \"#f\" if not base.particleMgrEnabled else \"#t\"\n volume = str(round(base.musicManager.getVolume(), 2))\n mute = \"#f\" if base.AppHasAudioFocus else \"#t\"\n #TODO: add any configuration variable name that you have added\n customConfigVariables = [\n \"\", \"particles-enabled\", \"audio-mute\", \"audio-volume\"]\n if os.path.exists(prcFile):\n # open the config file and change values according to current\n # application settings\n page = loadPrcFile(Filename.fromOsSpecific(prcFile))\n removeDecls = []\n for dec in range(page.getNumDeclarations()):\n # Check if our variables are given.\n # NOTE: This check has to be done to not loose our base or other\n # manual config changes by the user\n if page.getVariableName(dec) in customConfigVariables:\n decl = page.modifyDeclaration(dec)\n removeDecls.append(decl)\n for dec in removeDecls:\n page.deleteDeclaration(dec)\n # NOTE: particles-enabled and audio-mute are custom variables and\n # have to be loaded by hand at startup\n # Particles\n page.makeDeclaration(\"particles-enabled\", particles)\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", mute)\n else:\n # Create a config file and set default values\n cpMgr = ConfigPageManager.getGlobalPtr()\n page = cpMgr.makeExplicitPage(\"%s Pandaconfig\"%appName)\n # set OpenGL to be the default\n page.makeDeclaration(\"load-display\", \"pandagl\")\n # get the displays width and height\n w = self.pipe.getDisplayWidth()\n h = self.pipe.getDisplayHeight()\n # set the window size in the config file\n page.makeDeclaration(\"win-size\", \"%d %d\"%(w, h))\n # set the default to fullscreen in the config file\n page.makeDeclaration(\"fullscreen\", \"1\")\n # particles\n page.makeDeclaration(\"particles-enabled\", \"#t\")\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", \"#f\")\n # create a stream to the specified config file\n configfile = OFileStream(prcFile)\n # and now write it out\n page.write(configfile)\n # close the stream\n configfile.close()", "def read_config():\n filename = path.join(path.expanduser('~'), '.profrc')\n config = configparser.ConfigParser()\n config.read(filename)\n if 'baseurl' not in config['DEFAULT']:\n print(\"\"\"FATAL : No baseurl found in {0}\nOpen {0} and add the following lines\n\n[DEFAULT]\nBaseurl = https://your-prof-instance\"\"\".format(filename))\n sys.exit()\n try:\n requests.get(config['DEFAULT']['BASEURL'])\n except:\n print(\"{0} does not seems to be reachable. Verify the baseurl set at {1} matches ``https://your-prof-instance``\".format(config['DEFAULT']['BASEURL'], filename))\n sys.exit()\n return config", "def _init_config(self):\n self.config = self.config_template.specialize()\n print('MMH CONFIG:\\n' + str(self.config))", "def __init__(self, base='', *path_parts):\n self._config = {}\n self.path = join(base, *path_parts)\n\n if not isfile(self.path):\n raise ImproperlyConfigured('Not a file')\n\n with open(self.path, 'r') as secret_file:\n content = secret_file.read()\n\n for line in content.splitlines():\n if line and not line.startswith('#'):\n line_parts = line.split('=', 1)\n self._config[line_parts[0]] = line_parts[1]" ]
[ "0.58595735", "0.5857228", "0.5824042", "0.5811135", "0.5811135", "0.58059853", "0.5765053", "0.5761889", "0.57487726", "0.5743518", "0.57283866", "0.5719833", "0.5708684", "0.5700183", "0.56810653", "0.5675398", "0.56663287", "0.56349885", "0.5631693", "0.56010985", "0.55980307", "0.55529374", "0.5543958", "0.5515938", "0.55074733", "0.55001944", "0.5472579", "0.5472103", "0.54697883", "0.5462521" ]
0.72823024
0
A somewhat more scalable way to get the max episode lengths.
def get_max_episode_len(path): path = path.replace('data/', '') path = path.replace('goals/', '') task = tasks.names[path]() max_steps = task.max_steps - 1 # Remember, subtract one! return max_steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _max_periods(self):\n return self.data.shape[0]", "def get_max_time_steps (self):\n return self.degreedays.thawing.num_timesteps", "def getEvolutionMax(self):\n \n return [self.getMaximumAtGivenTime(timeIndex) for timeIndex in range(self.numberOfTimes - 1)]", "def get_movie_longest_runtime(movies: list) -> str:\n pass", "def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max", "def _maxValueLength(self):\n returnvalue = 0\n for row in self._value:\n for item in row:\n if (type(item) == type(float())):\n returnvalue = max(returnvalue, len('%.3f' % item))\n else:\n returnvalue = max(returnvalue, len(str(item)))\n return returnvalue", "def get_max_length(item_list: list) -> int:\n max_value = -float('inf')\n for item in item_list:\n if len(str(item)) > max_value:\n max_value = len(str(item))\n return max_value", "def maxs(self):\n return self._maxs", "def get_movie_longest_runtime():\n tree = get_tree()\n movie_runtimes = [(movie.get(\"title\"), movie.get(\"runtime\")) for movie in tree.getroot().findall(\"movie\")]\n\n # one way\n result = sorted(movie_runtimes, key=lambda movie_runtimes: movie_runtimes[1], reverse = True)[0][0]\n return result\n \n # another way\n # result = max(movie_runtimes, key=lambda movie_runtimes: movie_runtimes[1])[0]\n # return result", "def _get_infer_maximum_iterations(self, hparams):\n\t\tif hparams.tgt_max_len_infer:\n\t\t\tmaximum_iterations = hparams.tgt_max_len_infer\n\t\t\t_info('decoding with maximum iterations {}'.format(maximum_iterations))\n\t\telse:\n\t\t\tif self.mode == 'infer':\n\t\t\t\t_error('For Inference, tgt_max_len_infer in hparameters must set')\n\t\t\t\traise ValueError\n\t\t\tdecoding_length_factor = 3.0\n\t\t\tmax_encoder_length = tf.reduce_max(self.seq_length_decoder_input_data)\n\t\t\tmaximum_iterations = tf.to_int32(tf.round(\n\t\t\t\ttf.to_float(max_encoder_length) * decoding_length_factor))\n\t\treturn maximum_iterations", "def maxend (self):\n\n maxend = self.list[0][\"period\"].end\n for actor in self.list:\n if maxend < actor[\"period\"].end:\n maxend = actor[\"period\"].end\n return maxend", "def _max_days(self):\n # type: (...) -> Union[int, Tuple[int]]\n\n return self.value.max_days", "def get_v_max(self) -> int:\n return len(self.vocabulary)", "def _get_length_max(self):\n # data_list = list(range(len(self.files_refined)))\n data_list = range(self.idx_max_length, len(self.files_refined))\n progress = tqdm(data_list)\n for pdb_id in progress:\n features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)\n length = features_filt.shape[1]\n if (length > self.max_length):\n self.max_length = length\n progress.set_postfix({'pdb': self.files_refined[pdb_id],\n 'length': length,\n 'max_langth': self.max_length})\n save_checkpoint_feature(self.path_checkpoint_features, pdb_id, self.max_length, id)\n return self.max_length", "def maxs(self):\n return self.intervals[:, 1]", "def calculate_longest_title(self):\n longest_title_length = 0\n for movie in self.movies:\n title_length = len(movie.title)\n if title_length > longest_title_length:\n longest_title_length = title_length\n return longest_title_length", "def get_max_lengths(shopData):\n maxNameLen = 0\n maxValueLen = 0\n for each in shopData:\n if len(each[1]) > maxNameLen:\n maxNameLen = len(each[1])\n if len(str(each[2])) > maxValueLen:\n maxValueLen = len(str(each[2]))\n return maxNameLen, maxValueLen", "def get_max_dwell_mvals(model, state_data):\n dwell_results = []\n for ind in range(len(state_data)):\n ind_dwell = (model.times >= state_data['tstart'][ind]) & (model.times <= state_data['tstop'][ind])\n if np.any(ind_dwell):\n dwell_results.append(np.max(model.mvals[ind_dwell]))\n else:\n dwell_results.append(-1.0e6)\n return tuple(dwell_results)", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def n_episodes(self):\n raise NotImplementedError", "def _get_max_answers(self):\n return max([len(x) for x in self.labels])", "def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i", "def get_max_param(self):\r\n\r\n sql_str = \"SELECT jsonb_array_length(parameters) as length FROM alarm_condition\"\r\n\r\n param = self.postgres.query_fetch_all(sql_str)\r\n\r\n if param:\r\n\r\n max_param = max([p['length'] for p in param])\r\n else:\r\n\r\n max_param = 0\r\n\r\n return max_param", "def __latest_available_episode(self, predict_season: int, latest_episode: int) -> int:\n max_episode = 0\n for i in range(1, latest_episode + 1):\n if VideoParser.has_parsed_video(predict_season, i):\n max_episode = i\n else:\n return max_episode\n return max_episode", "def get_lmax_limit(self):\n\n if self.pixel == \"HEALPIX\":\n l_max_limit = 3 * self.nside - 1\n elif self.pixel == \"CAR\":\n cdelt = self.data.wcs.wcs.cdelt[1]\n l_max_limit = 360 / cdelt / 4\n return l_max_limit", "def max_total_length(murals):\n if not murals:\n return 0\n\n no_overlap = []\n for mural in murals:\n if mural[1] <= murals[0][0] or mural[0] >= murals[0][1]:\n no_overlap.append(mural)\n\n value = murals[0][1] - murals[0][0]\n del murals[0]\n return max(value + max_total_length(no_overlap), max_total_length(murals))", "def max_mireds(self):\n return 333", "def max_dets(self):\n return self._max_dets", "def max_length(lines):\n return max([len(s.split()) for s in lines])", "def maximum_element_size_for_length(length):\n\t\n\treturn (2**(7*length)) - 2" ]
[ "0.67602694", "0.65362716", "0.6483145", "0.64574337", "0.6382647", "0.6372196", "0.6358692", "0.6329625", "0.62262315", "0.6209432", "0.6173282", "0.6164478", "0.6163113", "0.6134469", "0.61204875", "0.6057055", "0.6035286", "0.60185933", "0.6007308", "0.60014915", "0.5984407", "0.59706765", "0.5962639", "0.59329903", "0.59309596", "0.59109765", "0.5908426", "0.59050554", "0.5904577", "0.59006596" ]
0.7456747
0
Add an episode to the dataset.
def add(self, episode, last_stuff=None): color, depth, action, info = [], [], [], [] for obs, act, i in episode: color.append(obs['color']) depth.append(obs['depth']) action.append(act) info.append(i) color = np.uint8(color) depth = np.float32(depth) def dump(data, field): field_path = os.path.join(self.path, field) if not os.path.exists(field_path): os.makedirs(field_path) fname = f'{self.num_episodes:06d}-{len(episode)}.pkl' pickle.dump(data, open(os.path.join(field_path, fname), 'wb')) dump(color, 'color') dump(depth, 'depth') dump(action, 'action') dump(info, 'info') # Handle last stuff. if last_stuff is not None: last_color = np.uint8(last_stuff[0]['color']) last_depth = np.float32(last_stuff[0]['depth']) dump(last_color, 'last_color') dump(last_depth, 'last_depth') dump(last_stuff[1], 'last_info') else: print('Warning! last_stuff=None. We may require this later.') self.episode_id += [self.num_episodes] * len(episode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def add(self, episodes: Union[List[\"_Episode\"], \"_Episode\"]):\n if isinstance(episodes, _Episode):\n episodes = [episodes]\n\n for eps in episodes:\n # Make sure we don't change what's coming in from the user.\n # TODO (sven): It'd probably be better to make sure in the EnvRunner to not\n # hold on to episodes (for metrics purposes only) that we are returning\n # back to the user from `EnvRunner.sample()`. Then we wouldn't have to\n # do any copying. Instead, either compile the metrics right away on the\n # EnvRunner OR compile metrics entirely on the Algorithm side (this is\n # actually preferred).\n eps = copy.deepcopy(eps)\n\n self._num_timesteps += len(eps)\n self._num_timesteps_added += len(eps)\n\n # Ongoing episode, concat to existing record.\n if eps.id_ in self.episode_id_to_index:\n eps_idx = self.episode_id_to_index[eps.id_]\n existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]\n old_len = len(existing_eps)\n self._indices.extend([(eps_idx, old_len + i) for i in range(len(eps))])\n existing_eps.concat_episode(eps)\n # New episode. Add to end of our episodes deque.\n else:\n self.episodes.append(eps)\n eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted\n self.episode_id_to_index[eps.id_] = eps_idx\n self._indices.extend([(eps_idx, i) for i in range(len(eps))])\n\n # Eject old records from front of deque (only if we have more than 1 episode\n # in the buffer).\n while self._num_timesteps > self.capacity and self.get_num_episodes() > 1:\n # Eject oldest episode.\n evicted_eps = self.episodes.popleft()\n evicted_eps_len = len(evicted_eps)\n # Correct our size.\n self._num_timesteps -= evicted_eps_len\n\n # Erase episode from all our indices:\n # 1) Main episode index.\n evicted_idx = self.episode_id_to_index[evicted_eps.id_]\n del self.episode_id_to_index[evicted_eps.id_]\n # 2) All timestep indices that this episode owned.\n new_indices = [] # New indices that will replace self._indices.\n idx_cursor = 0\n # Loop through all (eps_idx, ts_in_eps_idx)-tuples.\n for i, idx_tuple in enumerate(self._indices):\n # This tuple is part of the evicted episode -> Add everything\n # up until here to `new_indices` (excluding this very index, b/c\n # it's already part of the evicted episode).\n if idx_cursor is not None and idx_tuple[0] == evicted_idx:\n new_indices.extend(self._indices[idx_cursor:i])\n # Set to None to indicate we are in the eviction zone.\n idx_cursor = None\n # We are/have been in the eviction zone (i pointing/pointed to the\n # evicted episode) ..\n elif idx_cursor is None:\n # ... but are now not anymore (i is now an index into a\n # non-evicted episode) -> Set cursor to valid int again.\n if idx_tuple[0] != evicted_idx:\n idx_cursor = i\n # But early-out if evicted episode was only 1 single\n # timestep long.\n if evicted_eps_len == 1:\n break\n # Early-out: We reached the end of the to-be-evicted episode.\n # We can stop searching further here (all following tuples\n # will NOT be in the evicted episode).\n elif idx_tuple[1] == evicted_eps_len - 1:\n assert self._indices[i + 1][0] != idx_tuple[0]\n idx_cursor = i + 1\n break\n\n # Jump over (splice-out) the evicted episode if we are still in the\n # eviction zone.\n if idx_cursor is not None:\n new_indices.extend(self._indices[idx_cursor:])\n\n # Reset our `self._indices` to the newly compiled list.\n self._indices = new_indices\n\n # Increase episode evicted counter.\n self._num_episodes_evicted += 1", "def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")", "def new_episode(self):\n self.game.new_episode()", "def create_episode(conn, episode):\n sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)\n VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, episode)\n return cur.lastrowid", "def addEpisode(config, title, desc, mp3File, duration):\n logger.info(\"Adding episode to the RSS feed...\")\n\n # Create the item for the new episode\n item = ET.Element('item')\n\n # Add the sub elements from the config file\n addSubElementFromConfig(item, 'itunes:author', config, 'episodeAuthor')\n # addSubElementFromConfig(item, 'itunes:subtitle', config)\n addSubElementFromConfig(item, 'itunes:explicit', config, 'episodeExplicit')\n addSubElementFromConfig(item, 'itunes:image', config, 'episodeImage')\n\n # Add the remaining sub elements\n addSubElement(item, 'title', title)\n addSubElement(item, 'description', desc)\n addSubElement(item, 'itunes:summary', desc)\n addSubElement(item, 'pubDate', getFormattedUtcTime())\n\n # Format the duration\n durationStr = \"%0.f:%02.f:%02.f\" % duration\n addSubElement(item, 'itunes:duration', durationStr)\n\n # Create the public link to the episode\n episodeDir = config['episodeDir']\n if not episodeDir.endswith('/'):\n episodeDir += \"/\"\n\n episodeLink = generateLink(config['episodeDir'], os.path.basename(mp3File))\n\n addSubElement(item, 'guid', episodeLink)\n\n # Create the enclosure tag\n byteLength = os.path.getsize(mp3File)\n ET.SubElement(item, 'enclosure',\n url=episodeLink,\n length=str(byteLength),\n type=\"audio/mpeg3\")\n\n # Now generate the XML\n generateXml(config, item)", "def _add_season(self, ep, dvd=False):\n if dvd:\n snum = ep['dvd_season']\n seasons = self.dvd_seasons\n else:\n snum = ep['seasonnumber']\n seasons = self.seasons\n if seasons.has_key(snum):\n return seasons[snum]\n else:\n s = Season(\n sasonnumber=snum,\n seasonid=ep['seasonid'],\n seriesid=ep['seriesid']\n )\n s.series = self\n seasons[snum] = s\n return s", "def save_episode(results, process_id, episode, seed, dtype='tfrecord'):\n if dtype == 'tfrecord':\n save_episode_tf_record(results_dir, results, process_id, episode)\n else:\n assert dtype == 'numpy'\n save_episode_numpy(results, seed)", "def test_create_episode(self):\n episode = self._create_sample_episode()\n\n self.assertEqual(\n self.storage.get_episode(episode.study_id, episode.session_id,\n episode.id), episode)", "def set(self, episodes):\n self.episode_set = episodes", "def _create_sample_episode(self) -> study_pb2.Episode:\n study_id, session_id = self.init_session()\n episode = sample_episode(study_id=study_id, session_id=session_id)\n self.storage.create_episode(episode)\n return episode", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add_to_dataset(self, dataset: Dataset):\n pass", "def store_episode(self):\n episode_buffer = self._convert_episode_to_batch_major()\n episode_batch_size = len(episode_buffer['observation'])\n idx = self._get_storage_idx(episode_batch_size)\n\n for key in self._buffer:\n self._buffer[key][idx] = episode_buffer[key]\n self._n_transitions_stored = min(\n self._size_in_transitions, self._n_transitions_stored +\n self._time_horizon * episode_batch_size)", "def test_create_existing_episode(self):\n episode = self._create_sample_episode()\n with self.assertRaises(ValueError, msg='Episode already exists.'):\n self.storage.create_episode(episode)", "def addEvent(self, id, data, timp, descriere):\r\n event = Evenimente(id, data, timp, descriere)\r\n if self.validator.validare_event(event):\r\n self.repository.add_repository(event)\r\n return event", "def episode_step(self, action: Schema) -> None:\n raise NotImplementedError(\"episode_step not implemented.\")", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def episode_step(self):\n self.nsteps += 1", "def episode(self, title=None, episode=None):\n key = f'{self.key}/children'\n if title is not None and not isinstance(title, int):\n return self.fetchItem(key, Episode, title__iexact=title)\n elif episode is not None or isinstance(title, int):\n if isinstance(title, int):\n index = title\n else:\n index = episode\n return self.fetchItem(key, Episode, parentIndex=self.index, index=index)\n raise BadRequest('Missing argument: title or episode is required')", "def concat_episode(self, episode_chunk: \"_Episode\"):\n assert episode_chunk.id_ == self.id_\n assert not self.is_done\n # Make sure the timesteps match.\n assert self.t == episode_chunk.t_started\n\n episode_chunk.validate()\n\n # Make sure, end matches other episode chunk's beginning.\n assert np.all(episode_chunk.observations[0] == self.observations[-1])\n # Make sure the timesteps match (our last t should be the same as their first).\n assert self.t == episode_chunk.t_started\n # Pop out our end.\n self.observations.pop()\n\n # Extend ourselves. In case, episode_chunk is already terminated (and numpyfied)\n # we need to convert to lists (as we are ourselves still filling up lists).\n self.observations.extend(list(episode_chunk.observations))\n self.actions.extend(list(episode_chunk.actions))\n self.rewards.extend(list(episode_chunk.rewards))\n self.t = episode_chunk.t\n self.states = episode_chunk.states\n\n if episode_chunk.is_terminated:\n self.is_terminated = True\n elif episode_chunk.is_truncated:\n self.is_truncated = True\n # Validate.\n self.validate()", "def create_episode(e, debug=False):\n #{\"title\": , \"summary\": , \"image\": , \"link\": , \"season\": , \"number\": , \"rating\"}\n\n if debug:\n print(\"beginning create_episode()\")\n\n episode = {}\n\n # get BeautifulSoup data for extracting details\n episode_url = \"https://www.imdb.com/\" + e[\"link\"]\n episode_soup = bs4.BeautifulSoup(requests.get(episode_url).text, features=\"html.parser\")\n\n #get title\n title_wrapper = episode_soup.select(\".title_wrapper\")[0]\n episode[\"title\"] = title_wrapper.select(\"h1\")[0].contents[0].replace(u'\\xa0', ' ')\n\n #get summary\n episode[\"summary\"] = episode_soup.select(\".summary_text\")[0].contents[0].replace(u'\\n', ' ')\n\n #get image\n episode[\"image\"] = get_image(e[\"link\"], debug)\n\n #link\n episode[\"link\"] = e[\"link\"]\n\n #season\n episode[\"season\"] = e[\"season\"]\n\n #number\n episode[\"number\"] = e[\"episode_number\"]\n\n #rating\n episode[\"rating\"] = e[\"rating\"]\n\n return episode", "def add(self, obs_t, action, reward, obs_tp1, done, info):\n assert self.replay_buffer is not None\n # Update current episode buffer\n self.episode_transitions.append((obs_t, action, reward, obs_tp1, done, info))\n if done:\n # Add transitions (and imagined ones) to buffer only when an episode is over\n self._store_episode()\n # Reset episode buffer\n self.episode_transitions = []", "def add(self, movie):\n self.movies.append(movie)", "def sample_episode(\n study_id: str,\n session_id: str,\n episode_id: str = 'test',\n state: study_pb2.Episode.State = study_pb2.Episode.STATE_COMPLETED,\n start_time: Optional[datetime.datetime] = None,\n email: str = '[email protected]',\n num_steps: int = 100,\n total_reward: float = 1.0) -> study_pb2.Episode:\n episode = study_pb2.Episode(\n study_id=study_id,\n session_id=session_id,\n id=episode_id,\n state=state,\n user=study_pb2.User(email=email),\n num_steps=num_steps,\n total_reward=total_reward)\n if not start_time:\n start_time = datetime.datetime.now()\n episode.start_time.FromDatetime(start_time)\n return episode", "def new_episode(self, scores):\n\n # Keep track of an average score for use with annealing epsilon,\n # TODO: this currently lives in new_episode() because we only want to\n # update epsilon each episode, not each timestep, currently. This should\n # be further investigate about moving this into the epsilon property\n # itself instead of here\n avg_across = np.clip(len(scores), 1, 50)\n self.avg_score = np.array(scores[-avg_across:]).mean()\n\n self.memory.init_n_step()\n self.episode += 1", "def add_podcast(_name_of_the_podcast, _duration_in_number_of_seconds,\r\n _host, _participants):\r\n # creating an instance of our Podcast constructor\r\n new_podcast = Podcast(name_of_the_podcast=_name_of_the_podcast,\r\n duration_in_number_of_seconds=_duration_in_number_of_seconds,\r\n host=_host, participants=_participants)\r\n db.session.add(new_podcast) # add new Podcast to database session\r\n db.session.commit() # commit changes to session\r", "def get_episode(self, ep_id):\n sub_id = self._get_sub_id_for_ep(ep_id)\n html = self._get_html_for_subject_eps(sub_id)\n return BangumiEpisode.from_html(ep_id, html)", "def create_episodes_from_feed(self, entries):\n guids = self.podcast.episode_set.values_list(\"guid\", flat=True)\n entries = [entry for entry in entries if entry[\"id\"] not in guids]\n\n episodes = [\n episode\n for episode in [self.create_episode_from_feed(entry) for entry in entries]\n if episode\n ]\n return Episode.objects.bulk_create(episodes, ignore_conflicts=True)" ]
[ "0.7943587", "0.68175185", "0.6635185", "0.6529123", "0.63753206", "0.6367", "0.60311574", "0.5871607", "0.5834273", "0.58265716", "0.5808693", "0.5798912", "0.5786846", "0.57109386", "0.5707757", "0.5678602", "0.5658988", "0.5596643", "0.55840445", "0.5557425", "0.55429876", "0.5525358", "0.54686034", "0.5461457", "0.54526794", "0.54510903", "0.54486257", "0.53982246", "0.5352646", "0.53364295" ]
0.69033957
1
Depending on the env, make changes to image suffix `suff`.
def _change_name(self, suff, info_extra): if 'cable-ring' in self.path: i1 = info_extra['convex_hull_area'] i2 = info_extra['best_possible_area'] f = i1 / i2 suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png') elif 'cloth-flat' in self.path: i1 = info_extra['cloth_coverage'] suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png') elif 'bag-alone' in self.path: i1 = info_extra['convex_hull_area'] i2 = info_extra['best_possible_area'] suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png') else: pass return suff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_base_image_labels(driver, user_disk, img_name, branch, target):\n\n dashes = [i for i, c in enumerate(img_name) if c=='-']\n cf_version = img_name[dashes[0]+1:dashes[3]]\n build_id = img_name[dashes[-1]+1:]\n\n driver.ex_set_volume_labels(user_disk,\n {'cf_version': cf_version, 'branch': branch,\n 'target': target, 'build_id': build_id})", "def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def _configure_image_name(self, ccd_operation_mode,\n include_star_mag=False):\n dic = ccd_operation_mode\n em_gain = '_G' + str(dic['em_gain'])\n em_mode = 'CONV'\n if dic['em_mode'] == 1:\n em_mode = 'EM'\n hss = '_HSS' + str(dic['hss'])\n preamp = '_PA' + str(dic['preamp'])\n binn = '_B' + str(dic['binn'])\n t_exp = '_TEXP' + str(dic['t_exp'])\n self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain\n\n if include_star_mag:\n star_flux = '_S' + str(self.star_magnitude)\n self.image_name += star_flux", "def prepare_image_for_deploy(runtime: \"mlrun.runtimes.BaseRuntime\"):\n pass", "def generate_image_name(self, image):\n return image.replace('shub://', '').replace('/', '-') + '.simg'", "def setup_ec2_launch_override_to_emulate_ice(\n cluster, single_instance_type_ice_cr=\"\", multi_instance_types_ice_cr=\"\", multi_instance_types_exp_cr=\"\"\n):\n remote_command_executor = RemoteCommandExecutor(cluster)\n\n # fmt: off\n remote_command_executor.run_remote_script(\n script_file=str(SCALING_COMMON_DATADIR / \"overrides.sh\"),\n args=[\n f\"--single-instance-type-ice-cr \\\"{single_instance_type_ice_cr}\\\"\",\n f\"--multi-instance-types-ice-cr \\\"{multi_instance_types_ice_cr}\\\"\",\n f\"--multi-instance-types-exp-cr \\\"{multi_instance_types_exp_cr}\\\"\",\n ],\n run_as_root=True,\n )\n # fmt: on", "def cmd_up(self, services=None, verbose=False, build_log=None):\n\n # Re-populating the context...\n self.kard.make()\n\n eff_modules = self._resolve_services(services)\n\n # Image names may be different from service names (e.g. image re-use)\n images = set(s['image'].partition(':')[0] # without \":tag\" suffix\n for s in self._load_compose_config().services\n if s['name'] in eff_modules)\n\n self.build_images(images, verbose=verbose, logfile=build_log)\n\n self.start(services)\n\n # Do a nap while the containers are launching before calling\n # post_compose\n time.sleep(5)\n\n # Call post run handlers on extensions\n self.kard.extensions.post_up(eff_modules)", "def test_change_name_of_the_devicetrue():", "def test_redeploy_container_asset(self):\n pass", "def image_do_set(self, image_type, image_name):\n if image_type != \"kernel\" and image_type != \"kernel-x86\":\n raise Exception(\"%s: image type not supported (only \"\n \"'kernel[-x86]')\")\n\n self.log.info(\"rebooting to flash image %s:%s \" \\\n % (image_type, image_name))\n # So here is the trick: we reboot the target--but by setting\n # `self.image_name` to a file name, the post-power up sequence\n # implemented by power_on_do_post() is redirected to do the\n # upload sequence and then we power off.\n try:\n self.image_name = image_name\n self.power_cycle(self.owner_get())\n self.image_name = None\n except:\n self.image_name = None\n raise\n self.log.info(\"powering off after flashing image %s:%s \" \\\n % (image_type, image_name))\n self.power_off(self.owner_get())", "def suffix_replace(original, old, new):\n ...", "def test_change_name_of_the_devicefalse():", "def f2suff(forfile, opath, suff):\n import os\n\n idir = os.path.dirname(forfile)\n ifile = os.path.basename(forfile)\n odir = idir + '/' + opath\n ofile = ifile[0:ifile.rfind('.')] + '.' + suff\n\n return odir + '/' + ofile", "def _edit_arch_target_based(self, spec, prefix):\n if spec.version < Version(\"2.14\"):\n return False\n\n found_special_opt = False\n with working_dir(\"arch\"):\n arch_filename = \"{0}.arch\".format(self.build_directory)\n\n replace = [\n [r\"^CHARMARCH = .*$\", \"CHARMARCH = {0}\".format(self.spec[\"charmpp\"].charmarch)],\n [r\"^NAMD_ARCH = .*$\", \"NAMD_ARCH = {0}\".format(self.arch)],\n ]\n\n # Optimizations for skylake_avx512\n if (\n spec.platform == \"linux\"\n and self.compiler.name == \"intel\"\n and \"avx512\" in spec.target\n and spec.target >= \"skylake_avx512\"\n ):\n if spec.version >= Version(\"2.15\") and os.path.exists(\"Linux-AVX512-icc.arch\"):\n tty.info(\"Building binaries with AVX512-tile optimization\")\n copy(\"Linux-AVX512-icc.arch\", arch_filename)\n elif spec.version >= Version(\"2.14\") and os.path.exists(\"Linux-SKX-icc.arch\"):\n tty.info(\"Building binaries with Skylake-X\" \"AVX512 optimization\")\n copy(\"Linux-SKX-icc.arch\", arch_filename)\n else:\n return False\n\n replace.append([r\"^CXX = icpc\", \"CXX = {0}\".format(self.compiler.cxx)])\n replace.append([r\"^CC = icc\", \"CC = {0}\".format(self.compiler.cc)])\n found_special_opt = True\n\n if found_special_opt:\n for pattern, replacement in replace:\n filter_file(pattern, replacement, arch_filename)\n\n return found_special_opt", "def change_env(args):\n if len(args) != 2:\n raise Exception(\"syco chagne-env [env]\")\n\n env = args[1]\n\n app.print_verbose(\"Change to env \" + env)\n x(\"rm %spasswordstore \" % (SYCO_ETC_PATH))\n x(\"ln -s %spasswordstore.%s %spasswordstore\" % (\n SYCO_ETC_PATH, env, SYCO_ETC_PATH)\n )\n\n if os.access(app.SYCO_USR_PATH, os.F_OK):\n for plugin in os.listdir(app.SYCO_USR_PATH):\n plugin_path = os.path.abspath(app.SYCO_USR_PATH + plugin + \"/etc/\")\n\n x(\"rm %s/install.cfg \" % (plugin_path))\n x(\"ln -s %s/install-%s.cfg %s/install.cfg\" % (plugin_path, env, plugin_path))", "def configure(image_name, aws_region, aws_profile, python_version, requirements_dir):\n logger.info(ASCII_LOGO)\n _configure('.', image_name, aws_region, aws_profile, python_version, requirements_dir)", "def setupContainerFile(\n userNotif: UserNotif,\n ctx: ExecContext,\n rebuild: bool,\n inPlace: bool,\n cacheDir: Path) -> None:\n if not ctx.containerFile:\n raise RuntimeError(f\"{ctx.name}: container-file required\")\n\n localName, localFile = getLocalName(cacheDir, ctx.imageName, update=False)\n containerFileCopy: Optional[str] = None\n\n buildReasons: List[str] = []\n if rebuild:\n buildReasons.append(\"--rebuild set\")\n if not localFile.exists():\n buildReasons.append(f\"{localFile} doesn't exists\")\n elif localFile.read_text().strip() != ctx.containerFile.strip():\n if inPlace:\n # TODO: generalize this\n containerFileCopy = ctx.containerFile\n ctx.containerFile = \"\\n\".join([\n f\"FROM {localName}\",\n ctx.containerFile.split(\"\\n\")[-1]\n ])\n else:\n # TODO: show diff?\n ...\n buildReasons.append(f\"{localFile} content differ\")\n if not buildReasons and not podmanExists(\"image\", ctx.imageName):\n buildReasons.append(f\"{ctx.imageName} doesn't exist in the store\")\n\n if buildReasons:\n tmpFile = Path(str(localFile) + \".tmp\")\n tmpFile.parent.mkdir(parents=True, exist_ok=True)\n tmpFile.write_text(ctx.containerFile)\n userNotif(f\"Building {ctx.imageName} with {tmpFile} because: \" +\n \", \".join(buildReasons))\n try:\n build(tmpFile, localName, ctx.imageBuildCtx)\n except RuntimeError as e:\n raise RuntimeError(f\"Build of {tmpFile} failed: \" + str(e))\n if containerFileCopy:\n localFile.write_text(containerFileCopy)\n else:\n tmpFile.rename(localFile)", "def test_adjustixes(self) -> None:\n r = adjustixes('file', 'pre-', '-suf')\n assert r == 'pre-file-suf', r\n r = adjustixes('pre-file', 'pre-', '-suf')\n assert r == 'pre-file-suf', r\n r = adjustixes('file-suf', 'pre-', '-suf')\n assert r == 'pre-file-suf', r\n r = adjustixes('pre-file-suf', 'pre-', '-suf')\n assert r == 'pre-file-suf', r\n r = adjustixes('pre-file.xxx', 'pre-', '-suf')\n assert r == 'pre-file.xxx', r\n r = adjustixes('dir/file', 'pre-', '-suf')\n assert r == os.path.join('dir', 'pre-file-suf'), r\n\n # Verify that the odd case when library name is specified as 'lib'\n # doesn't yield lib.so, but yields the expected liblib.so\n r = adjustixes('PREFIX', 'PREFIX', 'SUFFIX')\n assert r == 'PREFIXPREFIXSUFFIX', \"Failed handling when filename = PREFIX [r='%s']\" % r", "def sdss_env(request):\n m = request.getfixturevalue(\"monkeypatch\")\n for p in ('PHOTO_CALIB', 'PHOTO_DATA', 'BOSS_PHOTOOBJ', 'PHOTO_REDUX',\n 'PHOTO_RESOLVE', 'PHOTO_SKY', 'PHOTO_SWEEP'):\n m.setenv(p, '/' + p)\n return m", "def tweak_new_filesystem(root_dir):\n\n # create a symlink for insserv\n force_symlink('../usr/lib/insserv/insserv',\n os.path.join(root_dir, 'sbin/insserv'))\n\n # create a symlink for awk\n force_symlink('mawk', os.path.join(root_dir, 'usr/bin/awk'))\n\n # Nvidia keeps packaging up a broken post-install script for their cudnn\n # deb. Freaking nvidia\n cudnn_postinst_path = 'var/lib/dpkg/info/libcudnn6-dev.postinst'\n cudnn_postinst_path = os.path.join(root_dir, cudnn_postinst_path)\n\n if os.path.exists(cudnn_postinst_path):\n with open(cudnn_postinst_path, 'r') as infile:\n content = infile.read()\n if not content.startswith(\"#!\"):\n with open(cudnn_postinst_path, 'w') as outfile:\n outfile.write('#! /bin/sh\\n')\n outfile.write(content)\n\n # NOTE(josh): patch the base-packages post-install hook so it doesn't\n # complain about files in /var/run\n basefiles_path = os.path.join(root_dir,\n 'var/lib/dpkg/info/base-files.postinst')\n if os.path.exists(basefiles_path):\n apply_patch_text(BASE_FILES_PATCH, root_dir)\n\n # NOTE(josh): ifupdown should depend on initscripts, but it doesn't\n status_path = os.path.join(root_dir, 'var/lib/dpkg/status')\n tempfile_path = status_path + '.tmp'\n with open(tempfile_path, 'wb') as outfile:\n with open(status_path, 'rb') as infile:\n for line in infile:\n outfile.write(line)\n if line.strip() == 'Package: ifupdown':\n break\n\n for line in infile:\n if line.startswith('Depends: '):\n line = ', '.join(line.strip().split(', ') + ['initscripts']) + '\\n'\n outfile.write(line)\n break\n else:\n outfile.write(line)\n\n for line in infile:\n outfile.write(line)\n os.rename(tempfile_path, status_path)\n\n # NOTE(josh): resolvconf tries to a write a file in this directory\n try:\n target_path = os.path.join(root_dir, 'run/resolvconf/interface')\n os.makedirs(target_path)\n except OSError:\n if not os.path.isdir(target_path):\n raise\n\n # NOTE(josh): Can't postinst makedev without CAP_MKNOD\n if os.getuid() != 0:\n makedev_postinst = os.path.join(root_dir,\n 'var/lib/dpkg/info/makedev.postinst')\n if os.path.exists(makedev_postinst):\n os.rename(makedev_postinst, makedev_postinst + '.bak')\n\n # remove temporary/boostrap files\n files_to_remove = ['etc/apt/sources.list.d/bootstrap.list']\n\n for filename in files_to_remove:\n file_path = os.path.join(root_dir, filename)\n if os.path.exists(file_path):\n os.remove(file_path)", "def generate_dockerfile_extension(base_image, template_name, config_path):\n template_path = get_template_path(template_name, config_path)\n template_file = os.path.join(template_path, \"Dockerfile\")\n dockerfile = \".Dockerfile.luda\"\n\n def remove():\n if os.path.exists(dockerfile):\n os.remove(dockerfile)\n\n with cd(template_path, remove):\n with open(dockerfile, \"w\") as output:\n docker_str = j2docker.render(base_image, template_file).decode().strip()\n output.write(docker_str)\n client = docker.from_env()\n if base_image.startswith(\"luda/\"):\n _, _, image_name = base_image.partition(\"luda/\")\n image_name, _, tag = image_name.partition(\":\")\n image_name = \"luda/{0}:{1}-{2}\".format(image_name, tag, template_name)\n else:\n image_name = \"luda/{0}:{1}\".format(base_image.replace('/', '-').replace(':', '-'), template_name)\n click.echo(\"Building image: {0} ...\".format(image_name))\n client.images.build(path=os.getcwd(), tag=image_name, dockerfile=dockerfile) # This line doesn't work with Python 3...\n return image_name", "def test_update_software_asset_bundle(self):\n pass", "def create_sh_script(\n unblur_path, input_image, output_dir,\n input_dir, input_suffix, options\n ):\n strSh = ''\n\n # To make sure it is a bash script\n strSh += '#!/bin/bash\\n\\n'\n\n # Export number of threads\n strSh += 'export OMP_NUM_THREADS={:d}\\n'.format(options.nr_threads)\n\n # The script will abort with non-zero exit values\n strSh += '# The script will abort with non-zero exit values\\n'\n strSh += 'set -e\\n'\n\n # Create a file list of all files\n strSh += '# Create a file list of all files\\n'\n strSh += 'fileList=$(ls {:s})\\n'.format(\n input_image\n )\n\n # Create folders\n strSh += '# Create folders\\n'\n strSh += 'mkdir -p {:s}/Doseuncorrected\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Shift\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Temp\\n'.format(output_dir)\n\n if options.filter_sum:\n strSh += 'mkdir -p {:s}/Filtered\\n'.format(output_dir)\n\n if options.dose_filter:\n strSh += 'mkdir -p {:s}/Dosecorrected\\n'.format(output_dir)\n\n if options.expert_mode:\n strSh += 'mkdir -p {:s}/FRC\\n\\n'.format(output_dir)\n\n # Abort script if files in Doseuncorrected already exists\n strSh += '# Abort script if files in Doseuncorrected already exists\\n'\n strSh += 'for f in {:s}/Doseuncorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in shift already exists\n strSh += '# Abort script if files in shift already exists\\n'\n strSh += 'for f in {:s}/Shift/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Dosecorrected already exists\n strSh += '# Abort script if files in Dosecorrected already exists\\n'\n strSh += 'for f in {:s}/Dosecorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Filtered already exists\n strSh += '# Abort script if files in Filtered already exists\\n'\n strSh += 'for f in {:s}/Filtered/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in FRC already exists\n strSh += '# Abort script if files in FRC already exists\\n'\n strSh += 'for f in {:s}/FRC/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Loop over all files\n strSh += '\\nfor file in $fileList\\ndo\\n\\n'\n\n strSh += 'baseName=${{file%{:s}}}\\n'.format(input_suffix)\n strSh += 'baseName=${{baseName#{:s}}}\\n'.format(input_dir)\n\n # Create a temporary file to work with to prevent format issues\n strSh += '# Create a temporary file to work with to prevent format issues\\n'\n strSh += 'e2proc3d.py $file {:s}/Temp/${{baseName}}_temp.mrc\\n\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n strSh += '# Remove some temporary files that unblur makes\\n'\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Start Unblur without dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say no to Dose Filtering\n strSh += 'NO\\n'\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # =========== #\n if options.dose_filter:\n\n # Start Unblur with dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say yes to Dose Filtering\n strSh += 'YES\\n'\n # Exposure per Frame\n strSh += '{:f}\\n'.format(options.exposure_per_frame)\n # Acceleration Voltage\n strSh += '{:f}\\n'.format(options.voltage)\n # Pre Exposure\n strSh += '{:f}\\n'.format(options.pre_exposure)\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove temporary file\n strSh += 'rm {:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n if options.filter_sum:\n # Filter Images\n lowpass_angstrom = options.pixel_size / options.lowpass\n highpass_angstrom = options.pixel_size / options.highpass\n strSh += \\\n 'e2proc3d.py {:s}/Doseuncorrected/${{baseName}}{:s}.mrc '.format(\n output_dir,\n options.sum_suffix\n )\n strSh += '{:s}/Filtered/${{baseName}}{:s}.mrc ' \\\n .format(\n output_dir,\n options.sum_suffix\n )\n strSh += '--process=filter.lowpass.gauss:cutoff_freq={:f} '.format(\n options.lowpass\n )\n strSh += '--process=filter.highpass.gauss:cutoff_freq={:f}\\n\\n' \\\n .format(\n options.highpass\n )\n\n if options.remove_sum:\n # Remove sum files\n strSh += 'rm {:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n\n # Done\n strSh += 'done\\n\\n'\n\n # Remove temp folder\n strSh += 'rm -r {:s}/Temp\\n'.format(output_dir)\n\n strSh += 'echo \"All done!\"'\n\n # Write Output\n with open('{:s}/scriptUnblur.sh'.format(output_dir), 'w') as f:\n f.write(strSh)", "def _reapply_bsdflags_to_image(mysettings):\n\tif bsd_chflags:\n\t\tos.system(\"mtree -e -p %s -U -k flags < %s > /dev/null\" % \\\n\t\t\t(_shell_quote(mysettings[\"D\"]),\n\t\t\t_shell_quote(os.path.join(mysettings[\"T\"], \"bsdflags.mtree\"))))", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def up_cmd(ctx):\n pass", "def suffix(string, suffix, sep = '_'):\n if suffix == 'production':\n suffixed = string\n else:\n suffixed = string + sep + suffix\n return suffixed", "def execute(helper, config, args):\n env_config = parse_env_config(config, args.environment)\n cname_prefix = env_config.get('cname_prefix', None)\n env_name = args.environment\n\n # change version\n if args.version_label:\n helper.deploy_version(env_name, args.version_label)\n if not args.dont_wait:\n helper.wait_for_environments(env_name, status='Ready', version_label=args.version_label)\n\n # update it\n env = parse_env_config(config, env_name)\n option_settings = parse_option_settings(env.get('option_settings', {}))\n helper.update_environment(env_name,\n description=env.get('description', None),\n option_settings=option_settings,\n tier_type=env.get('tier_type'),\n tier_name=env.get('tier_name'),\n tier_version=env.get('tier_version'))\n\n # wait\n if not args.dont_wait:\n helper.wait_for_environments(env_name, health='Green', status='Ready', version_label=args.version_label)\n\n # delete unused\n helper.delete_unused_versions(versions_to_keep=int( get(config, 'app.versions_to_keep', 10) ))", "def switchToAppInstaller(dev):\n print('Switching to app install mode')\n SonyExtCmdCamera(dev).switchToAppInstaller()" ]
[ "0.5632567", "0.55875903", "0.51115805", "0.4960264", "0.49135855", "0.4792716", "0.476165", "0.47523832", "0.47510242", "0.47409478", "0.4703219", "0.46628687", "0.46209535", "0.4608203", "0.45926276", "0.4582887", "0.45757735", "0.45634633", "0.45632792", "0.4560272", "0.4558895", "0.45524743", "0.45474005", "0.45439598", "0.45396808", "0.4531507", "0.4511762", "0.449487", "0.44802305", "0.44700146" ]
0.5841067
0
To get more finegrained analysis of environment performance. Many of these require the last_info, which is not saved in info_l, which has all the time steps BEFORE that. For cablering and bagaloneopen, we terminate based on a fraction. Use `info_l[0]['extras']` to get the dict at the start. The `last_info` is already the 'extras' dict, just use it directly.
def _track_data_statistics(self, info_l, last_info, episode_len, all_stats, maxlen_stats): maxlen = get_max_episode_len(self.path) start = info_l[0]['extras'] last_ex = last_info['extras'] if 'cable-shape' in self.path or 'cable-line-notarget' in self.path: nb_sides = start['nb_sides'] frac_beads = last_ex['nb_zone'] / last_ex['nb_beads'] if episode_len == maxlen: maxlen_stats[f'done_{nb_sides}'].append( last_ex['task.done'] ) maxlen_stats[f'frac_{nb_sides}'].append( frac_beads ) all_stats[f'done_{nb_sides}'].append( last_ex['task.done'] ) all_stats[f'frac_{nb_sides}'].append( frac_beads ) all_stats[f'len_{nb_sides}'].append( episode_len ) elif 'cable-ring' in self.path: delta = last_ex['fraction'] - start['fraction'] percent = last_ex['convex_hull_area'] - start['convex_hull_area'] percent = 100 * percent / start['convex_hull_area'] if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) maxlen_stats['fraction'].append( last_ex['fraction'] ) maxlen_stats['fraction_delta'].append( delta ) maxlen_stats['percent_improve'].append( percent ) all_stats['done'].append( last_ex['task.done'] ) all_stats['fraction'].append( last_ex['fraction'] ) all_stats['fraction_delta'].append( delta ) all_stats['percent_improve'].append( percent ) elif 'cloth-flat' in self.path: delta = last_ex['cloth_coverage'] - start['cloth_coverage'] if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) maxlen_stats['coverage_delta'].append( delta ) maxlen_stats['cloth_coverage'].append( last_ex['cloth_coverage'] ) all_stats['done'].append( last_ex['task.done'] ) all_stats['coverage_delta'].append( delta ) all_stats['cloth_coverage'].append( last_ex['cloth_coverage'] ) elif 'cloth-cover' in self.path: if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) elif 'bag-alone-open' in self.path: delta = last_ex['fraction'] - start['fraction'] percent = last_ex['convex_hull_area'] - start['convex_hull_area'] percent = 100 * percent / start['convex_hull_area'] if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) maxlen_stats['fraction'].append( last_ex['fraction'] ) maxlen_stats['fraction_delta'].append( delta ) maxlen_stats['percent_improve'].append( percent ) all_stats['done'].append( last_ex['task.done'] ) all_stats['fraction'].append( last_ex['fraction'] ) all_stats['fraction_delta'].append( delta ) all_stats['percent_improve'].append( percent ) elif 'bag-items-easy' in self.path or 'bag-items-hard' in self.path: # For this it'd be interesting to see what task stage we're at. if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) maxlen_stats['task_stage'].append( last_ex['task_stage'] ) maxlen_stats['zone_items_rew'].append( last_ex['zone_items_rew'] ) maxlen_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] ) all_stats['done'].append( last_ex['task.done'] ) all_stats['task_stage'].append( last_ex['task_stage'] ) all_stats['zone_items_rew'].append( last_ex['zone_items_rew'] ) all_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] ) elif 'bag-color-goal' in self.path: if episode_len == maxlen: maxlen_stats['done'].append( last_ex['task.done'] ) maxlen_stats['task_stage'].append( last_ex['task_stage'] ) maxlen_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] ) maxlen_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] ) all_stats['done'].append( last_ex['task.done'] ) all_stats['task_stage'].append( last_ex['task_stage'] ) all_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] ) all_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] ) else: print(f'For: {self.path}, we are not tracking extra stats.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_more_info(model):# pragma: no cover\n\n group_time = model.meta.exposure.group_time\n nframes_used = model.meta.exposure.nframes\n saturated_flag = dqflags.group['SATURATED']\n jump_flag = dqflags.group['JUMP_DET']\n\n return (group_time, nframes_used, saturated_flag, jump_flag)", "def infotodict(seqinfo):\n\n last_run = len(seqinfo)\n\n info = {\n t1w: [], t2w: [], epi_fmap_AP: [], epi_fmap_PA: [],\n\n rest_ap_run1: [], rest_pa_run2: [],\n rest_ap_run3: [], rest_pa_run4: [],\n rest_ap_run1_sbref: [], rest_pa_run2_sbref: [],\n rest_ap_run3_sbref: [], rest_pa_run4_sbref: [],\n\n dwi_ap_run1: [], dwi_pa_run2: [],\n dwi_ap_run3: [], dwi_pa_run4: [],\n dwi_ap_run1_sbref: [], dwi_pa_run2_sbref: [],\n dwi_ap_run3_sbref: [], dwi_pa_run4_sbref: []\n }\n\n def get_latest_series(key, s):\n # if len(info[key]) == 0:\n info[key].append(s.series_id)\n # else:\n # info[key] = [s.series_id]\n\n for s in seqinfo:\n if \"abort\" in s.protocol_name.lower():\n continue\n\n if s.protocol_name == 'SpinEchoFieldMap_AP':\n get_latest_series(epi_fmap_AP, s)\n\n elif s.protocol_name == 'SpinEchoFieldMap_PA':\n get_latest_series(epi_fmap_PA, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run1':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run1, s)\n else:\n get_latest_series(rest_ap_run1_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run2':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run2, s)\n else:\n get_latest_series(rest_pa_run2_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run3':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run3, s)\n else:\n get_latest_series(rest_ap_run3_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run4':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run4, s)\n else:\n get_latest_series(rest_pa_run4_sbref, s)\n\n # dMRI naming conventions switch half-way through. Some end with _RunX\n elif s.protocol_name.startswith('dMRI_dir98_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run1, s)\n else:\n get_latest_series(dwi_ap_run1_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir98_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run2, s)\n else:\n get_latest_series(dwi_pa_run2_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run3, s)\n else:\n get_latest_series(dwi_ap_run3_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run4, s)\n else:\n get_latest_series(dwi_pa_run4_sbref, s)\n\n elif s.protocol_name == 'T1w_MPR':\n get_latest_series(t1w, s)\n\n elif s.protocol_name == 'T2w_SPC':\n get_latest_series(t2w, s)\n\n else:\n print(\"Series not recognized!: \", s.protocol_name, s.dcm_dir_name)\n return info", "def eta_details(self):\n\t\t# Experimentation gives you 72pts to a random science every production\n\t\t# Stupid brute force implementation for now\n\t\trequired = self.required\n\t\trate = self.player.science\n\t\tdef combine(base, add, add_time, chance):\n\t\t\t# add given add into base with +add_time tick and modified by chance\n\t\t\tfor time, p in add.items():\n\t\t\t\ttime += add_time\n\t\t\t\tp *= chance\n\t\t\t\tbase[time] = base.get(time, 0) + p\n\t\tdef _eta_details(value, time_to_prod=self.galaxy.production_rate):\n\t\t\tnaive_eta = max(0, int(math.ceil((required - value)/rate)))\n\t\t\tif naive_eta <= time_to_prod: return {naive_eta: 1}\n\t\t\tbase = {}\n\t\t\twithout_extra = _eta_details(value + rate*time_to_prod)\n\t\t\twith_extra = _eta_details(value + rate*time_to_prod + 72)\n\t\t\tcombine(base, without_extra, time_to_prod, 6/7.)\n\t\t\tcombine(base, with_extra, time_to_prod, 1/7.)\n\t\t\treturn base\n\t\treturn _eta_details(self.current, self.galaxy.production_rate - self.galaxy.production_counter)", "def all_info(stdscr, jetson, key):\n # Screen size\n height, width = stdscr.getmaxyx()\n line_counter = 1\n # Plot Status CPU\n line_counter = plot_CPUs(stdscr, line_counter, jetson.stats['CPU'], width)\n # Plot MTS\n if 'MTS' in jetson.stats:\n line_counter += 1\n stdscr.addstr(line_counter, 0, \"MTS \", curses.color_pair(5))\n MTS_FG = {'name': 'FG',\n 'value': int(jetson.stats['MTS']['fg']),\n }\n linear_percent_gauge(stdscr, MTS_FG, width // 2 - 2,\n offset=line_counter, start=4, color_name=5)\n MTS_BG = {'name': 'BG',\n 'value': int(jetson.stats['MTS']['bg']),\n }\n linear_percent_gauge(stdscr, MTS_BG, width // 2 - 2,\n offset=line_counter, start=2 + width // 2, color_name=5)\n # RAM linear gauge info\n ram_status = jetson.stats['RAM']['RAM']\n lfb_status = jetson.stats['RAM']['lfb']\n RAM_VALUE = {'name': \"Mem\",\n 'value': int(ram_status['used'][-1] / float(ram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(lfb_status['nblock']) + \"x\" + str(lfb_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(ram_status['used'][-1] / 1000.0, ram_status['total'] / 1000.0),\n }\n line_counter += 1\n linear_percent_gauge(stdscr, RAM_VALUE, width, offset=line_counter)\n # EMC linear gauge info\n if 'EMC' in jetson.stats:\n line_counter += 1\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['EMC']), width, offset=line_counter)\n # IRAM linear gauge info\n iram_status = jetson.stats['IRAM']\n if iram_status:\n line_counter += 1\n IRAM_VALUE = {'name': \"Imm\",\n 'value': int(iram_status['used'][-1] / float(iram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(iram_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(iram_status['used'][-1] / 1000.0,\n iram_status['total'] / 1000.0),\n }\n linear_percent_gauge(stdscr, IRAM_VALUE, width, offset=line_counter)\n # SWAP linear gauge info\n swap_status = jetson.stats['SWAP']\n if swap_status:\n SWAP_VALUE = {'name': \"Swp\",\n 'value': int(swap_status['used'][-1] / float(swap_status['total']) * 100.0),\n 'label': \"(cached \" + str(swap_status['cached']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(swap_status['used'][-1] / 1000.0,\n swap_status['total'] / 1000.0),\n }\n else:\n SWAP_VALUE = {'name': \"Swp\"}\n line_counter += 1\n linear_percent_gauge(stdscr, SWAP_VALUE, width, offset=line_counter)\n # GPU linear gauge info\n line_counter += 1\n if 'GR3D' in jetson.stats:\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['GR3D']), width, offset=line_counter + 1)\n line_counter += 2\n # Status disk\n disk_status = jetson.disk\n DISK_STATUS = {'name': \"Dsk\",\n 'value': int(float(disk_status['used']) / float(disk_status['total']) * 100.0),\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(disk_status['used'], disk_status['total']),\n }\n linear_percent_gauge(stdscr, DISK_STATUS, width, offset=line_counter, type_bar=\"#\", color_name=3)\n # Last part of information\n split = 1.0\n split += 1.0 if jetson.stats['temperatures'] else 0.0\n split += 1.0 if jetson.stats['voltages'] else 0.0\n column_width = int(float(width - 4) / split)\n line_counter += 1\n # List of all mini menu\n mini_menu = [compact_info, plot_temperatures, plot_voltages]\n # Evaluate column width\n column_width = int(float(width) / len(mini_menu))\n for idx, mini in enumerate(mini_menu):\n # Run mini page\n mini(stdscr, idx * column_width, line_counter, column_width, jetson)", "def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)", "def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:\n visual_observations: List[List[Any]] = [\n []\n ] # TODO add types to brain.py methods\n vector_observations = []\n text_observations = []\n memories = []\n rewards = []\n local_dones = []\n max_reacheds = []\n agents = []\n prev_vector_actions = []\n prev_text_actions = []\n action_masks = []\n for agent_id in next_info.agents:\n agent_brain_info = self.training_buffer[agent_id].last_brain_info\n if agent_brain_info is None:\n agent_brain_info = next_info\n agent_index = agent_brain_info.agents.index(agent_id)\n for i in range(len(next_info.visual_observations)):\n visual_observations[i].append(\n agent_brain_info.visual_observations[i][agent_index]\n )\n vector_observations.append(\n agent_brain_info.vector_observations[agent_index]\n )\n text_observations.append(agent_brain_info.text_observations[agent_index])\n if self.policy.use_recurrent:\n if len(agent_brain_info.memories) > 0:\n memories.append(agent_brain_info.memories[agent_index])\n else:\n memories.append(self.policy.make_empty_memory(1))\n rewards.append(agent_brain_info.rewards[agent_index])\n local_dones.append(agent_brain_info.local_done[agent_index])\n max_reacheds.append(agent_brain_info.max_reached[agent_index])\n agents.append(agent_brain_info.agents[agent_index])\n prev_vector_actions.append(\n agent_brain_info.previous_vector_actions[agent_index]\n )\n prev_text_actions.append(\n agent_brain_info.previous_text_actions[agent_index]\n )\n action_masks.append(agent_brain_info.action_masks[agent_index])\n if self.policy.use_recurrent:\n memories = np.vstack(memories)\n curr_info = BrainInfo(\n visual_observations,\n vector_observations,\n text_observations,\n memories,\n rewards,\n agents,\n local_dones,\n prev_vector_actions,\n prev_text_actions,\n max_reacheds,\n action_masks,\n )\n return curr_info", "def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:\n visual_observations = [[]]\n vector_observations = []\n text_observations = []\n memories = []\n rewards = []\n local_dones = []\n max_reacheds = []\n agents = []\n prev_vector_actions = []\n prev_text_actions = []\n for agent_id in next_info.agents:\n agent_brain_info = self.training_buffer[agent_id].last_brain_info\n if agent_brain_info is None:\n agent_brain_info = next_info\n agent_index = agent_brain_info.agents.index(agent_id)\n for i in range(len(next_info.visual_observations)):\n visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])\n vector_observations.append(agent_brain_info.vector_observations[agent_index])\n text_observations.append(agent_brain_info.text_observations[agent_index])\n if self.policy.use_recurrent:\n if len(agent_brain_info.memories > 0):\n memories.append(agent_brain_info.memories[agent_index])\n else:\n memories.append(self.policy.make_empty_memory(1))\n rewards.append(agent_brain_info.rewards[agent_index])\n local_dones.append(agent_brain_info.local_done[agent_index])\n max_reacheds.append(agent_brain_info.max_reached[agent_index])\n agents.append(agent_brain_info.agents[agent_index])\n prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])\n prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])\n if self.policy.use_recurrent:\n memories = np.vstack(memories)\n curr_info = BrainInfo(visual_observations, vector_observations, text_observations,\n memories, rewards, agents, local_dones, prev_vector_actions,\n prev_text_actions, max_reacheds)\n return curr_info", "def get_basic_info(info_dict, previous=False):\n coords_bohr = np.array(info_dict.get(\"geometry\"))\n coords_ang = coords_bohr * bohr2ang\n args_dict = info_dict.get(\"params\")\n energies = info_dict.get(\"energies\")\n gradients = info_dict.get(\"gradients\")\n\n params_dict = {\"images\": args_dict.get(\"images\"), \"maxg\": args_dict.get(\"maximum_force\"),\n \"avgg\": args_dict.get(\"average_force\"), \"nebk\": args_dict.get(\"spring_constant\"),\n \"neb_maxcyc\": args_dict.get(\"maximum_cycle\"), \"plain\": args_dict.get(\"spring_type\"),\n \"skip\": not args_dict.get(\"hessian_reset\"), \"epsilon\": args_dict.get(\"epsilon\")}\n iteration = args_dict.get(\"iteration\")\n params = NEBParams(**params_dict)\n\n M = Molecule()\n M.elem = info_dict.get(\"elems\")\n M.charge = info_dict.get(\"charge\")\n M.mult = info_dict.get(\"mult\")\n if previous:\n M.xyzs = [coords.reshape(-1, 3) for coords in np.array(info_dict.get(\"coord_ang_prev\"))]\n else:\n M.xyzs = [coords.reshape(-1, 3) for coords in coords_ang]\n\n params.customengine = nullengine(M)\n M, engine = get_molecule_engine(**{\"customengine\": params.customengine})\n\n result = []\n for i in range(len(energies)):\n if previous:\n result = info_dict.get(\"result_prev\")\n else:\n result.append({\"energy\": energies[i], \"gradient\": gradients[i]})\n\n return params, M, engine, result, iteration - 1", "def infotodict(seqinfo):\n\n # data = create_key('run{item:03d}')\n # info = {data: []}\n # last_run = len(seqinfo)\n\n \"\"\"\n The namedtuple `s` contains the following fields:\n\n * total_files_till_now\n * example_dcm_file\n * series_id\n * dcm_dir_name\n * unspecified2\n * unspecified3\n * dim1\n * dim2\n * dim3\n * dim4\n * TR\n * TE\n * protocol_name\n * is_motion_corrected\n * is_derived\n * patient_id\n * study_description\n * referring_physician_name\n * series_description\n * image_type\n \"\"\"\n\n t1w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T1w')\n t2w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T2w')\n func_rest = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-rest_bold')\n dwi_ap = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-AP_dwi')\n dwi_pa = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-PA_dwi')\n t2star = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_T2star')\n t2w_fatsat = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_acq-fatsat_T2w')\n \n info = {t1w: [], t2w: [], func_rest: [], dwi_ap: [], dwi_pa: [], t2star: [], t2w_fatsat: []}\n\n for idx, s in enumerate(seqinfo):\n if (s.example_dcm_file == 'mp_rage_1_mm-00001.dcm'):\n info[t1w].append(s.series_id)\n if ('edti_2mm_cdif45_AP' in s.series_description):\n info[dwi_ap].append(s.series_id)\n if ('edti_2mm_cdif45_PA' in s.series_description):\n info[dwi_pa].append(s.series_id)\n if (s.series_description == 'Sag CUBE T2'):\n info[t2w].append(s.series_id)\n if (s.series_description == 'ORIG Sag CUBE T2'):\n info[t2w_orig].append(s.series_id)\n if ('T2_1.7mm_fat_sat' in s.series_description): \n info[t2w_fatsat].append(s.series_id)\n if (s.series_description == 'Reverse blip EPI 3mm iso'):\n info[t2star].append(s.series_id) \n if (s.series_description == 'Resting EPI 3mm iso RS') and (s.dim3 == 12300):\n info[func_rest].append(s.series_id)\n return info", "def singleInfo(path):\n if os.path.isfile(path):\n with open(path, 'rb') as file:\n result = json.load(file)\n values = result['f_val']\n type_of_problem = result['optimization_type']\n if type_of_problem == 'Maximize':\n best_seen = max(values)\n worse_seen = min(values)\n best_index = np.argmax(values)\n else:\n best_seen = min(values)\n worse_seen = max(values)\n best_index = np.argmin(values)\n median_seen = np.median(values)\n mean_seen = np.mean(values)\n\n dict_metrics = result['dict_model_runs']\n model_runs = result['model_runs']\n name_metrics = list(dict_metrics.keys())\n dict_results = dict()\n for name in name_metrics:\n dict_results[name] = list()\n metric_results = dict_metrics[name]\n iterations = len(metric_results.keys())\n for i in range(iterations):\n dict_results[name].append(\n metric_results['iteration_' + str(i)])\n\n hyperparameters = result['x_iters']\n name_hyp = list(hyperparameters.keys())\n best_hyperparameter_configuration = dict()\n for name in name_hyp:\n best_hyperparameter_configuration.update(\n {name: hyperparameters[name][best_index]})\n # dizionaro di output\n dict_return = dict()\n dict_return.update({\"model_runs\": dict_results})\n dict_return.update({\"f_val\": values})\n dict_return.update({\"best_seen\": best_seen})\n dict_return.update({\"worse_seen\": worse_seen})\n dict_return.update({\"median_seen\": median_seen})\n dict_return.update({\"mean_seen\": mean_seen})\n dict_return.update({\"number_of_model_runs\": model_runs})\n dict_return.update({\"current_iteration\": result[\"current_call\"],\n \"total_iterations\": result[\"number_of_call\"]})\n dict_return.update({\"hyperparameter_configurations\": hyperparameters})\n dict_return.update(\n {\"hyperparameter_configuration\": best_hyperparameter_configuration})\n dict_return.update({\"optimized_metric\": result[\"metric_name\"]})\n\n # other hyper-parameter values\n dict_model_attributes = result['model_attributes']\n dict_return.update({\"model_attributes\": dict_model_attributes})\n dict_return.update({\"model_name\": result[\"model_name\"]})\n\n dict_values_extra_metrics = dict()\n dict_stats_extra_metrics = dict()\n\n if len(result['extra_metric_names']) > 0:\n # metrics names\n dict_return.update({\"metric_names\": name_metrics[1:]})\n extra_metrics_names = list(result['dict_model_runs'].keys())\n\n for name in extra_metrics_names[1:]:\n values = []\n dict_values = result['dict_model_runs'][name]\n iterations = list(dict_values.keys())\n for j in iterations:\n values.append(np.median(dict_values[j]))\n dict_values_extra_metrics.update({name: values})\n val_stats = [np.max(values), np.min(\n values), np.median(values), np.mean(values)]\n dict_stats_extra_metrics.update({name: val_stats})\n dict_return.update(\n {\"extra_metric_vals\": dict_values_extra_metrics})\n dict_return.update(\n {\"extra_metric_stats\": dict_stats_extra_metrics})\n else:\n dict_return.update({\"metric_names\": 0})\n dict_return.update({\"extra_metric_vals\": dict()})\n dict_return.update({\"extra_metric_stats\": dict()})\n\n return dict_return\n return None", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info", "def log_extract(log_info):\n \n #Handle file names, strings and open file-like objects equivalently\n with uber_open_rmode(log_info) as log_info:\n \n headers = []\n footers = []\n i = 0\n \n #for all lines in file/output\n for line in log_info:\n \n #skip blank lines\n if len(line.split()) == 0:\n continue\n \n #This is listed before both run and minimize simulations \n if 'Memory usage per processor =' in line:\n headers.append(i+1)\n \n #This follows both run and minimize simulations\n elif 'Loop time of' in line:\n footers.append(i-1)\n \n i += 1\n \n #Add last line to footers for incomplete logs\n footers.append(i)\n \n log_info.seek(0)\n \n #Create DataModelDict root\n log_dict = DM()\n log_dict['LAMMPS-log-thermo-data'] = DM()\n \n #for all lines in file/output\n for header, footer in zip(headers, footers):\n\n #Read thermo data\n df = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\\s+', engine='python', skip_blank_lines=True)\n log_info.seek(0) \n\n #Convert to DataModelDict\n thermo = DM()\n for j in df:\n thermo[str(j)] = df[j].values.tolist()\n \n #Append simulation results to DataModelDict root\n simulation = DM([('thermo', thermo)])\n log_dict['LAMMPS-log-thermo-data'].append('simulation', simulation)\n \n return log_dict", "def estimateInfo(Analysis, ImageData, diffract, print_opt=False): \n total_images = ImageData['totalImages'] \n\n for key in Analysis:\n Analysis[key]['cones'] = [1,2,3,4,5,6]\n Analysis[key]['info'] = np.zeros(len(\n Analysis[key]['cones']))\n\n for amp in ImageData['rawAmp']:\n\n for key in Analysis:\n ind = len(diffract['cpd'])\n fooInfo = info.SingleConeEntropyFunc((amp[ind] ** 2 *\n Analysis[key]['retina']), \n Analysis[key]['cones']) \n Analysis[key]['info'] += fooInfo / total_images\n\n if print_opt == True:\n print ' '\n print 'Information'\n print '------------'\n for key in Analysis:\n print key, ': ', Analysis[key]['info']\n\n return Analysis", "def AddInfoAfterRecursive(self):\n \n print('Info about channel:' + str(self.sig))\n startpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 0])\n endpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 1])\n localBaseline = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 2]\n localVariance = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 3]\n for (j,k) in enumerate(startpoints): print(\"%10.7f\"% float(startpoints[j]/self.outputsamplerate))\n CusumBaseline=500\n numberofevents = len(startpoints)\n self.AnalysisResults[self.sig]['StartPoints'] = startpoints\n self.AnalysisResults[self.sig]['EndPoints'] = endpoints\n self.AnalysisResults[self.sig]['LocalBaseline'] = localBaseline\n self.AnalysisResults[self.sig]['LocalVariance'] = localVariance\n self.AnalysisResults[self.sig]['NumberOfEvents'] = len(startpoints)\n\n #### Now we want to move the endpoints to be the last minimum for each ####\n #### event so we find all minimas for each event, and set endpoint to last ####\n\n deli = np.zeros(numberofevents)\n dwell = np.zeros(numberofevents)\n limit=500e-6*self.outputsamplerate #0.5 ms\n AllFits={}\n\n for i in range(numberofevents):\n length = endpoints[i] - startpoints[i]\n if length <= limit and length>3:\n # Impulsion Fit to minimal value\n deli[i] = localBaseline[i] - np.min(self.data[self.sig][int(startpoints[i]+1):int(endpoints[i]-1)]) #current drop cuurrent at starting point - current minimal velue\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate #length of event in seconds\n elif length > limit:\n deli[i] = localBaseline[i] - np.mean(self.data[self.sig][int(startpoints[i]+5):int(endpoints[i]-5)])\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate\n # # Cusum Fit\n # sigma = np.sqrt(localVariance[i])\n # delta = 2e-9\n # h = 1 * delta / sigma\n # (mc, kd, krmv) = CUSUM(self.out[self.sig][startpoints[i]-CusumBaseline:endpoints[i]+CusumBaseline], delta, h)\n # zeroPoint = startpoints[i]-CusumBaseline\n # krmv = krmv+zeroPoint+1\n # AllFits['Event' + str(i)] = {}\n # AllFits['Event' + str(i)]['mc'] = mc\n # AllFits['Event' + str(i)]['krmv'] = krmv\n else:\n deli[i] = localBaseline[i] - np.min(self.data[self.sig][startpoints[i]:endpoints[i]])\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate\n\n frac = deli / localBaseline #fraction: current drop / current at start\n dt = np.array(0)\n dt = np.append(dt, np.diff(startpoints) / self.outputsamplerate) # differences between starts of different events (Frequency of events)\n numberofevents = len(dt)\n\n #self.AnalysisResults[self.sig]['CusumFits'] = AllFits\n self.AnalysisResults[self.sig]['FractionalCurrentDrop'] = frac # current drop / current at start \n self.AnalysisResults[self.sig]['DeltaI'] = deli #current drop in nA\n self.AnalysisResults[self.sig]['DwellTime'] = dwell #end[i] - start[i] in sec.\n self.AnalysisResults[self.sig]['Frequency'] = dt # start[i+1] - start[i] in sec.", "def info():\n return r\"\"\"Tseng, Lin-Yu, and Chun Chen. \"Multiple trajectory search for unconstrained/constrained multi-objective optimization.\" Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009.\"\"\"", "def info(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")", "def minfo():\n model = np.loadtxt('cumul_depths.tmp',dtype={'names': ('H'),'formats': \\\n ('f4')}, usecols=[0])\n d = model['H']\n model = np.loadtxt('start_model.dat',dtype={'names': (\"S\"),'formats': \\\n ('f4')}, skiprows=1,usecols=[2])\n vs = model['S']\n\n A = np.repeat(vs,2)\n B = np.repeat(d,2)\n B = np.insert(B,[0],0.0)[:-1] \n out = zip(A, B)\n \n f = open('model.info','w+')\n for line in out:\n print (\" \".join(str(x) for x in line))\n f.write(\" \".join(str(x) for x in line) + \"\\n\") \n f.close()", "def infotodict(seqinfo):\n\n t1 = create_key('anat/sub-{subject}_run-{item:02d}_T1w')\n rest_fmri_ap = create_key('func/sub-{subject}_dir-ap_task-rest_run-{item:02d}_bold')\n rest_topup_ap = create_key('func/sub-{subject}_dir-ap_run-{item:02d}_bold')\n rest_topup_pa = create_key('func/sub-{subject}_dir-pa_run-{item:02d}_bold')\n fmap_rest_magnitude1 = create_key('fmap/sub-{subject}_run-{item:02d}_magnitude1')\n fmap_rest_phasediff = create_key('fmap/sub-{subject}_run-{item:02d}_phasediff')\n\n # Create an empty dictionary called info for each key\n\n info = {t1: [],\n rest_fmri_ap: [],\n rest_topup_ap: [],\n rest_topup_pa: [],\n fmap_rest_magnitude1: [],\n fmap_rest_phasediff: [],\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if (('MPRAGE_GRAPPA2' in s.series_id) and\n ('tfl3d1_16ns' in s.sequence_name) and\n (s.dim3 == 192) and\n (s.dim4 == 1)):\n info[t1] = [s.series_id]\n\n if (('BOLD_resting 4X4X4 A>>P' in s.series_id) and\n ('epfid2d1_64' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 190)):\n info[rest_fmri_ap] = [s.series_id]\n\n if (('rest_topup_A>>P' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_ap] = [s.series_id]\n\n if (('rest_topup_P>>A' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_pa] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 4.92)):\n info[fmap_rest_magnitude1] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 7.38)):\n info[fmap_rest_phasediff] = [s.series_id]\n\n return info", "def getInfoDrum(self, l):\n for i in range(0, len(l)):\n for j in range(len(l[i].info.oameni)):\n l[i].info.oameni[j].timp_asteptat = 0\n l[i].info.oameni[j].timp_mers = 0\n l[i].info.oameni[j].traseu = None\n \n for i in range(1, len(l)):\n for j in range(len(l[i].info.oameni)):\n index_prev = l[i-1].info.getOmIndex(l[i].info.oameni[j].name)\n if l[i].info.oameni[j].state != l[i-1].info.oameni[index_prev].state:\n if l[i-1].info.oameni[index_prev].state == \"waiting\" and l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n elif l[i-1].info.oameni[index_prev].state == \"travelling\" and l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n else:\n if l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n elif l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n\n for i in range(1, len(l)):\n if l[i].info.event.tip == \"boarding\":\n temp_traseu = [l[i].info.event.om.current_loc]\n j = i\n while j < len(l) and (l[j].info.event.tip != \"unboarding\" or l[j].info.event.om.name != l[i].info.event.om.name):\n j += 1\n if j >= len(l):\n return None\n unboarding_loc = l[j].info.event.om.current_loc\n direction = l[i].info.event.autobuz.direction_forward\n index_boarding = l[i].info.event.autobuz.getIndexLoc(temp_traseu[0])\n index_unboarding = l[i].info.event.autobuz.getIndexLoc(unboarding_loc)\n if direction:\n for x in range(index_boarding+1, index_unboarding+1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n else:\n for x in range(index_boarding-1, index_unboarding-1, -1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n l[i].info.oameni[l[i].info.getOmIndex(l[i].info.event.om.name)].traseu = temp_traseu\n for x in range(i, j):\n index = l[x].info.getOmIndex(l[i].info.event.om.name)\n assert index != None\n l[x].info.oameni[index].traseu = temp_traseu\n\n return l", "def analyze_experiment(info_file_name: str, scope_name: str, comment: str, cai_params: dict, func_channel=0,\n tail_frame_rate=250) -> Experiment2P:\n if \"indicator_decay_time\" not in cai_params:\n raise ValueError(\"At least 'indicator_decay_time' has to be specified in cai_params\")\n if func_channel < 0 or func_channel > 1:\n raise ValueError(f'func_channel {func_channel} is not valid. Has to be 0 (\"green\"\") or 1 (\"red\"\")')\n exp = Experiment2P()\n exp.scope_name = scope_name\n exp.comment = comment\n # copy acquisition information extracted from experiment files\n eparser = ExperimentParser(info_file_name)\n if not eparser.is_dual_channel and func_channel != 0:\n raise ValueError(f\"Experiment is single channel but func_channel was set to {func_channel}\")\n exp.experiment_name = eparser.experiment_name\n exp.original_path = eparser.original_path\n exp.info_data = eparser.info_data\n exp.scanner_data = eparser.scanner_data\n # collect data in tail files if applicable\n try:\n if eparser.has_tail_data:\n for tf in eparser.tail_files:\n exp.tail_data.append(np.genfromtxt(path.join(exp.original_path, tf), delimiter='\\t'))\n # Since we only keep the bout calls, the time constant passed below is arbitrary\n td = TailData.load_tail_data(path.join(exp.original_path, tf), 3.0, tail_frame_rate,\n eparser.info_data[\"frame_duration\"])\n exp.bout_data.append(td.bouts)\n exp.tail_frame_times.append(td.frame_time)\n exp.tail_frame_rate = td.frame_rate\n except (IOError, OSError) as e:\n print(f\".tail files are present but at least one file failed to load. Not attaching any tail data.\")\n print(e)\n exp.tail_data = []\n exp.bout_data = []\n # collect data in laser files if applicable\n try:\n if eparser.has_laser_data:\n for lf in eparser.laser_files:\n exp.laser_data.append(np.genfromtxt(path.join(exp.original_path, lf)))\n except (IOError, OSError) as e:\n print(f\".laser files are present but at least one file failed to load. Not attaching any laser data.\")\n print(e)\n exp.laser_data = []\n # use caiman to extract units and calcium data\n if eparser.is_dual_channel:\n print(f\"This experiment has dual channel data. Ch{func_channel} is being processed as functional channel.\"\n f\" Other, anatomy channel, is co-aligned.\")\n data_files = eparser.ch_0_files if func_channel == 0 else eparser.ch_1_files\n if eparser.is_dual_channel:\n co_files = eparser.ch_1_files if func_channel == 0 else eparser.ch_0_files\n else:\n co_files = None\n for i, ifl in enumerate(data_files):\n cai_params[\"time_per_frame\"] = exp.info_data[\"frame_duration\"]\n cai_params[\"fov_um\"] = exp.scanner_data[i][\"fov\"]\n cai_wrapper = CaImAn(**cai_params)\n ifile = path.join(exp.original_path, ifl)\n if eparser.is_dual_channel:\n cofile = path.join(exp.original_path, co_files[i])\n else:\n cofile = None\n print(f\"Now analyzing: {ifile}\")\n images, params, co_images = cai_wrapper.motion_correct(ifile, cofile)\n\n exp.mcorr_dicts.append(params[\"Motion Correction\"])\n exp.projections.append(np.mean(images, 0))\n stack = np.array(images)\n stack -= np.min(stack)\n stack[stack > 255] = 255\n stack = stack.astype(np.uint8)\n exp.func_stacks.append(stack)\n if eparser.is_dual_channel:\n exp.anat_projections.append(np.mean(co_images, 0))\n print(\"Motion correction completed\")\n cnm2, params = cai_wrapper.extract_components(images, ifile)[1:]\n exp.cnmf_extract_dicts.append(params[\"CNMF\"])\n exp.cnmf_val_dicts.append(params[\"Validation\"])\n print(\"Source extraction completed\")\n exp.all_c.append(cnm2.estimates.C.copy())\n exp.all_dff.append(cnm2.estimates.F_dff.copy())\n exp.all_centroids.append(get_component_centroids(cnm2.estimates.A, images.shape[1], images.shape[2]))\n coords, weights = get_component_coordinates(cnm2.estimates.A, images.shape[1], images.shape[2])\n exp.all_sizes.append(np.array([w.size for w in weights]))\n spatial_footprints = []\n for c_ix, (comp_coords, comp_weights) in enumerate(zip(coords, weights)):\n ix = np.full(comp_coords.shape[0], c_ix)[:, None]\n spat = np.c_[ix, comp_weights[:, None], comp_coords]\n spatial_footprints.append(spat)\n exp.all_spatial.append(np.vstack(spatial_footprints))\n exp.populated = True\n return exp", "def check_ec500_general_battery_current(item, params, info):\n state = 3\n infotext = \"unknown_value\"\n index = 0\n perfdata = []\n ec500_general_battery_current = None\n try:\n #print info\n for line in info:\n index= index + 1\n ec500_general_battery_current = line[0]\n #print rec_share_value \n try:\n ec500_general_battery_current = float(ec500_general_battery_current)\n except Exception,e:\n ec500_general_battery_current = line[0].replace(' ','@')\n state = 0\n perfdata.append((\"ec500_general_battery_%d_current\" %index,ec500_general_battery_current))\n infotext = \"ec500_general_battery_current=%s\" % ec500_general_battery_current\n except Exception,e:\n infotext = \"unknown_value\"\n return (state,infotext,perfdata)", "def _deltas(self):\n istat = self.init\n lstat = self.stats\n uptime = self._uptime()\n delta = float(uptime) - float(self.uptime)\n self.uptime = uptime\n \n for dev in lstat.keys():\n if not istat.has_key(dev):\n del lstat[dev]\n continue\n idev = istat[dev]\n ldev = lstat[dev]\n\n for key,value in ldev.items():\n if re.search(r'(^major\\Z|^minor\\Z)',key):\n continue\n \n if not idev.has_key(key):\n print \"Different keys in statistics\"\n sys.exit(1)\n if not str(value).isdigit and \\\n not str(ldev[key]).isdigit(): \n print \"value of key is not a number\"\n sys.exit(1)\n \n if ldev[key] == idev[key]:\n ldev[key] = self._sprintf('%.2f', 0)\n elif int(delta) > 0:\n ldev[key] = self._sprintf('%.2f',float((ldev[key] - idev[key]) / delta))\n else:\n\t ldev[key] = self._sprintf('%.2f', float(ldev[key] - idev[key]))\n idev[key] = value\n return idev", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def overheadstats(sdb, obsdate, update=True):\n bvs_updated = 0\n scams=0\n #for a given obsdate get the night info\n nid=getnightinfo(sdb, obsdate)\n\n #get the times for the night\n record=sdb.select('EveningTwilightEnd, MorningTwilightStart', 'NightInfo', 'NightInfo_Id=%i' % nid)\n stime=record[0][0]\n etime=record[0][1]\n totaltime=(etime-stime).seconds\n\n #From the sdb, get the SoLogEvent table\n record=sdb.select('EventType_Id, EventTime', 'SoLogEvent', 'NightInfo_Id=%i' % nid)\n event_list=[]\n for i in range(len(record)):\n r=record[i]\n if r[1].seconds>43200:\n t=datetime.datetime(int(obsdate[0:4]), int(obsdate[5:7]), int(obsdate[8:10]), 0, 0, 0)+r[1]\n else:\n t=datetime.datetime(int(obsdate[0:4]), int(obsdate[5:7]), int(obsdate[8:10]), 0, 0, 0)+datetime.timedelta(days=1)+r[1]\n event_list.append([r[0], t])\n #sort the list by the datetimes\n event_list.sort(key=lambda e:e[1])\n\n #get the night's list of blocks\n selcmd='BlockVisit_Id'\n tabcmd='BlockVisit'\n logcmd='NightInfo_Id=%i and BlockVisitStatus_Id=%i' % (nid, 1)\n bvid_list=sdb.select(selcmd, tabcmd, logcmd)\n bvid_list[:]=[bvid[0] for bvid in bvid_list]\n\n #get a list of all images from the night\n select_state='FileName, Proposal_Code, Target_Name, ExposureTime, UTSTART, h.INSTRUME, h.OBSMODE, h.DETMODE, h.CCDTYPE, NExposures, BlockVisit_Id'\n table_state='FileData Join ProposalCode on (FileData.ProposalCode_Id = ProposalCode.ProposalCode_Id) join FitsHeaderImage as h using (FileData_Id)'\n formatteddate = obsdate.replace('-','')\n logic_state=\"FileName like '%\"+formatteddate+\"%' order by UTSTART\"\n img_list=sdb.select(select_state, table_state, logic_state)\n img_list[:] = [img for img in img_list if not \"CAL_\" in img[1] and not \"ENG_\" in img[1] and not \"JUNK\" in img[1]]\n\n #get a list of all RSS images from the night\n select_state='FileName, Proposal_Code, Target_Name, ExposureTime, UTSTART, h.INSTRUME, '\n select_state+='h.OBSMODE, h.DETMODE, h.CCDTYPE, NExposures, BlockVisit_Id, r.GRATING, r.GR_STA, r.AR_STA'\n table_state='FileData Join ProposalCode on (FileData.ProposalCode_Id = ProposalCode.ProposalCode_Id) '\n table_state+='join FitsHeaderImage as h using (FileData_Id) join FitsHeaderRss as r using (FileData_Id)'\n formatteddate = obsdate.replace('-','')\n logic_state=\"FileName like '%\"+formatteddate+\"%' order by UTSTART\"\n rss_imglist=sdb.select(select_state, table_state, logic_state)\n rss_imglist[:] = [img for img in rss_imglist if not \"CAL_\" in img[1] and not \"ENG_\" in img[1] and not \"JUNK\" in img[1]]\n\n #get a list of all point commands from the night\n select_state= 'BlockVisit_Id, EventTime, Block_Id, Target_Name, NightInfo_Id, EventData'\n table_state='PointEvent join SoLogEvent using (SoLogEvent_Id)'\n plist=sdb.select(select_state, table_state, 'NightInfo_Id=%i' % nid)\n point_list=[]\n for i in range(len(plist)):\n p=plist[i]\n if p[1].seconds>43200:\n t=datetime.datetime(int(obsdate[0:4]), int(obsdate[5:7]), int(obsdate[8:10]), 0, 0, 0)+p[1]\n else:\n t=datetime.datetime(int(obsdate[0:4]), int(obsdate[5:7]), int(obsdate[8:10]), 0, 0, 0)+datetime.timedelta(days=1)+p[1]\n point_list.append([p[0], t, p[2], p[3], p[4], p[5]])\n\n #deal with accepted blocks\n for bvid in bvid_list:\n\n #determine start time (point) and end time (track end)\n pointtime = findpointcommand(bvid, point_list)\n if pointtime is None:\n #print('no point')\n continue\n starttime=pointtime\n endtime=findguidingstop(starttime, event_list)\n if endtime is None:\n #print('no end')\n continue\n\n #determine total time\n tottime=endtime-starttime\n #some limit to avoid crazy stats\n if tottime.seconds > 10000:\n #print('total too long, took %i s'%tottime.seconds)\n continue\n\n #determine the slew time\n guidestart=findguidingstart(starttime, event_list)\n if guidestart is None:\n #print('no trackstart')\n continue\n slewtime=guidestart-starttime\n if slewtime.seconds > 1000:\n #print('slew too long, took %i s'%slewtime.seconds)\n continue\n\n #determine the time between TrackStart and OnTarget\n ontarget=findontarget(starttime, event_list)\n if ontarget is None:\n #print('no on target?')\n continue\n trackerslewtime=ontarget-guidestart\n if trackerslewtime.seconds > 500:\n #print('trackslew too long, took %i s'%trackerslewtime.seconds)\n continue\n\n #get primary instrument, check if MOS\n scams=0\n instr, primary_mode=getprimarymode(img_list, bvid)\n if instr == 'SALTICAM':\n #print('its a scam!')\n scams+=1\n continue\n\n select_state= 'Block_Id'\n table_state='BlockVisit'\n logic_state='BlockVisit_Id=%i' % (bvid)\n bids=sdb.select(select_state, table_state, logic_state)\n if not bids:\n #print(\"no bid!\")\n continue\n else:\n bid=bids[0]\n selcmd='Block_Id, Barcode'\n tabcmd='Block join Pointing using (Block_Id) join Observation using (Pointing_Id) '\n tabcmd+='join TelescopeConfigObsConfig using (Pointing_Id) join ObsConfig on (PlannedObsConfig_Id=ObsConfig_Id) '\n tabcmd+='join RssPatternDetail using (RssPattern_Id) join Rss using (Rss_Id) join RssProcedure using (RssProcedure_Id) '\n tabcmd+='join RssConfig using (RssConfig_Id) join RssMask using (RssMask_Id)'\n logcmd='RssProcedureType_Id = \\'7\\' and Block_Id = %i group by Block_Id order by Block_Id' % bid\n mos=sdb.select(selcmd, tabcmd, logcmd)\n if mos:\n instr='MOS'\n\n if instr == 'MOS':\n #special case for MOS science acquisition\n mosacq=getfirstimage(rss_imglist, ontarget, instr, primary_mode, bvid)\n mosacqtime=mosacq-ontarget\n if mosacqtime.seconds > 1000:\n #print(\"MOS Acquisition too long, took %i s\"%mosacqtime.seconds)\n continue\n else:\n #determine the Salticam acquisition time after being on target\n scamstart=getfirstscam(img_list, starttime, 'SALTICAM', 'IMAGING', bvid)\n if scamstart is None:\n #print(\"Did not find SCAM image\")\n continue\n acqtime=scamstart-ontarget\n if acqtime.seconds > 1000:\n #print(\"Target Acquisition too long, took %i s\"%acqtime.seconds)\n continue\n #determine the time between acquisition and first science image\n sciencestart=getfirstimage(img_list, scamstart, instr, primary_mode, bvid)\n if sciencestart is None:\n #print(\"Did not find science image for BV %i using %s\" % (bvid, instr))\n continue\n sciacqtime=sciencestart-scamstart\n if sciacqtime.seconds > 1000:\n #print(\"Science Acquisition too long, took %i s\"%sciacqtime.seconds)\n continue\n\n #update results in sdb\n if update:\n bvs_updated+=1\n inscmd='SlewTime=%i, TrackerSlewTime=%i' % (slewtime.seconds, trackerslewtime.seconds)\n sdb.update(inscmd, 'BlockVisit', 'BlockVisit_Id=%i' % bvid)\n if instr == 'MOS':\n inscmd='MOSAcquisitionTime=%i' % (mosacqtime.seconds)\n sdb.update(inscmd, 'BlockVisit', 'BlockVisit_Id=%i' % bvid)\n else:\n inscmd='TargetAcquisitionTime=%i, InstrumentAcquisitionTime=%i' % (acqtime.seconds, sciacqtime.seconds)\n sdb.update(inscmd, 'BlockVisit', 'BlockVisit_Id=%i' % bvid)\n\n print(bvs_updated)\n print(len(bvid_list)-scams)\n return bvid_list", "def info():\n return r\"\"\"Lin-Yu Tseng and Chun Chen, \"Multiple trajectory search for Large Scale Global Optimization,\" 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210\"\"\"", "def _get_time_info(self, keys: list[str]):\n if self.is_info_v2:\n if not self.is_on:\n return 0\n return self.int_or_none(self._data.get(keys[1]))\n return self._data.get(keys[0])", "def get_info():\n with open('explorers.json', 'r') as file:\n block_expl_info = json.load(file)\n BLOCK_EXPL_INFO['block_explorers'] = [{'analytics': [None, None]} for i in range(len(block_expl_info))]\n analytic_thread = threading.Thread(target=get_analytics)\n analytic_thread.start()\n print(analytic_thread)\n counter_api = 0\n for elem in block_expl_info:\n print(counter_api, elem)\n api = search(block_expl_info[elem], \"api\")\n name, currency, url, best_height_key, timer = search(block_expl_info[elem], \"name\"), search(\n block_expl_info[elem], \"currency\"), search(block_expl_info[elem], \"url\"), search(block_expl_info[elem],\n \"best_height_key\"), search(\n block_expl_info[elem], \"api_limit\")\n if api:\n my_thread = threading.Thread(target=get_best_height,\n args=(name, currency, url, best_height_key, counter_api, timer))\n counter_api += 1\n my_thread.start()\n print(my_thread)\n else:\n latest_block = BLOCK_EXPL_INFO\n latest_block_list = latest_block['block_explorers']\n latest_block_list[counter_api][\"name\"] = name\n latest_block_list[counter_api][\"currency\"] = currency\n latest_block_list[counter_api][\"best_height\"] = best_height_key\n latest_block_list[counter_api][\"api\"] = None\n counter_api += 1", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def getInfo():" ]
[ "0.58495504", "0.58472466", "0.5484478", "0.53190255", "0.5158806", "0.5147971", "0.50936896", "0.50092375", "0.49753112", "0.4957122", "0.49444202", "0.493542", "0.49158955", "0.47993928", "0.4793157", "0.47781986", "0.4762298", "0.47620896", "0.47439304", "0.4739045", "0.47362673", "0.47318342", "0.47076964", "0.47026762", "0.47002995", "0.46917313", "0.46876726", "0.46850532", "0.46834987", "0.46588346" ]
0.6225882
0
For each item (timestep) in this episode, save relevant images.
def _save_images(self, episode_len, color_l, depth_l, info_l, outdir, i_ep): for t in range(episode_len): assert color_l[t].shape == (3, 480, 640, 3), color_l[t].shape assert depth_l[t].shape == (3, 480, 640), depth_l[t].shape # Recall that I added 'extras' to the info dict at each time. info = info_l[t] info_r = info['extras'] # We saved three color/depth images per time step. for k in range(3): c_img = color_l[t][k] d_img = depth_l[t][k] assert c_img.dtype == 'uint8', c_img.dtype assert d_img.dtype == 'float32', d_img.dtype d_img = process_depth(img=d_img) # Andy uses U.reconstruct_heightmap(color, depth, configs, ...) obs_input = {'color': color_l[t], 'depth': depth_l[t]} colormap, heightmap = get_heightmap(obs_input) heightmap_proc = process_depth(img=heightmap) # Save image that combines the interesting ones above, makes it # easier to copy and paste. Horizontally concatenate images and # save. Also convert to BGR because OpenCV assumes that format # but PyBullet uses RGB (to be consistent). Network should be # seeing RGB images I believe (but just be consistent). c_img_front = color_l[t][0] # Shape (480, 640, 3) c_img_front = cv2.resize(c_img_front, (426,320)) # numpy shape: (320,426) barrier = np.zeros((320,4,3)) # Black barrier of 4 pixels combo = np.concatenate(( cv2.cvtColor(c_img_front, cv2.COLOR_BGR2RGB), barrier, cv2.cvtColor(colormap, cv2.COLOR_RGB2BGR), barrier, heightmap_proc), axis=1) # Optionally include title with more details, but env dependent. suffix_all = f'{i_ep:06d}-{t:02d}-OVERALL.png' suffix_all = self._change_name(suffix_all, info_r) cv2.imwrite(os.path.join(outdir,suffix_all), combo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_images(self, step, images):\n\n # Save\n with self.summary_writer.as_default():\n for name, batch in images.items():\n image = batch[0]\n image = tf.expand_dims(image, axis=0)\n tf.summary.image(name, image, step)", "def save_images(self):\n for q in range(self.N_itr):\n plt.clf()\n self.plot_EM_estimate(q)\n plt.savefig('img%d.png' % (100 + q))", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def saveImage(self, observation):\n image_path = \"{}/{}/frame{:06d}\".format(self.data_folder, self.episode_folder, self.episode_step)\n relative_path = \"{}/{}/frame{:06d}\".format(self.name, self.episode_folder, self.episode_step)\n self.images_path.append(relative_path)\n # in the case of dual/multi-camera\n if observation.shape[2] > 3:\n observation1 = cv2.cvtColor(observation[:, :, :3], cv2.COLOR_BGR2RGB)\n observation2 = cv2.cvtColor(observation[:, :, 3:], cv2.COLOR_BGR2RGB)\n\n cv2.imwrite(\"{}_1.jpg\".format(image_path), observation1)\n cv2.imwrite(\"{}_2.jpg\".format(image_path), observation2)\n else:\n observation = cv2.cvtColor(observation, cv2.COLOR_BGR2RGB)\n cv2.imwrite(\"{}.jpg\".format(image_path), observation)", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def save_episode_models(self, episode):\n s1 = './' + self.game_name + '/'\n s_pth = '{}.pth'.format(episode + 1)\n torch.save(self.actor.state_dict(), s1 + 'actor' + s_pth)\n torch.save(self.critic1.state_dict(), s1 + 'critic1_' + s_pth)\n torch.save(self.critic2.state_dict(), s1 + 'critic2_' + s_pth)\n torch.save(self.critic1_target.state_dict(), s1 + 'critic1_target' + s_pth)\n torch.save(self.critic2_target.state_dict(), s1 + 'critic2_target' + s_pth)\n\n torch.save(self.log_alpha, s1 + 'log_alpha' + s_pth)\n\n torch.save(self.actor_optimiser.state_dict(), s1 + 'actor_optimizer' + s_pth)\n torch.save(self.critic1_optimiser.state_dict(), s1 + 'critic1_optimizer' + s_pth)\n torch.save(self.critic2_optimiser.state_dict(), s1 + 'critic2_optimizer' + s_pth)\n torch.save(self.alpha_optimiser.state_dict(), s1 + 'alpha_optimizer' + s_pth)\n np.save(s1 + 'scores{}.npy'.format(episode + 1), self.scores)", "def save_items(self, path):\n os.makedirs(path, exist_ok=True)\n np.save(os.path.join(path, 'ids'), self.item_ids)\n np.save(os.path.join(path, 'titles'), self.item_titles)\n np.save(os.path.join(path, 'embeddings'), self.item_vectors)\n np.save(os.path.join(path, 'categories'), self.item_categories)", "def inspect(self, save_imgs=True):\n if 'data' in self.path:\n outdir = (self.path).replace('data/', 'data_out/')\n elif 'goals' in self.path:\n outdir = (self.path).replace('goals/', 'goals_out/')\n if os.path.exists(outdir):\n import shutil\n print(f'Removing: {outdir}')\n shutil.rmtree(outdir)\n os.makedirs(outdir)\n print(f'Saving to: {outdir}')\n print(f'episode_set: {self.episode_set}')\n print(f'num_episodes: {self.num_episodes}')\n\n def _load(i_ep, episode_len, field):\n field_path = os.path.join(self.path, field)\n fname = f'{i_ep:06d}-{episode_len}.pkl'\n return pickle.load(open(os.path.join(field_path, fname), 'rb'))\n\n # For data analysis later to evalute demonstrator quality. Anything for\n # 'maxlen_stats' should only be applied on episodes with max length.\n ep_lengths = []\n all_stats = defaultdict(list)\n maxlen_stats = defaultdict(list)\n\n for i_ep in range(self.num_episodes):\n if i_ep % 20 == 0:\n print(f'\\ton episode {i_ep}')\n\n # is_episode_sample: list of [F,F,T,T,F,F,..], T for this epis only.\n is_episode_sample = np.int32(self.episode_id) == i_ep\n episode_samples = np.argwhere(is_episode_sample).squeeze().reshape(-1)\n episode_len = len(episode_samples)\n ep_lengths.append(episode_len)\n color_l = _load(i_ep, episode_len, 'color')\n depth_l = _load(i_ep, episode_len, 'depth')\n info_l = _load(i_ep, episode_len, 'info')\n\n # Save images if desired, but can take a lot of time.\n if save_imgs:\n self._save_images(episode_len, color_l, depth_l, info_l, outdir, i_ep)\n\n # ---------------------------------------------------------------- #\n # If we have 'last' info, we should use it. Should apply to any\n # data generated on August 12 or later. As of Aug 28, I changed it\n # so `last_info` will save everything in the `info`, NOT just the\n # 'extras' key (needed for goal conditioning).\n # ---------------------------------------------------------------- #\n if not os.path.exists(os.path.join(self.path, 'last_info')):\n continue\n last_color = _load(i_ep, episode_len, 'last_color')\n last_depth = _load(i_ep, episode_len, 'last_depth')\n last_info = _load(i_ep, episode_len, 'last_info')\n\n # Add stuff to `ep_` lists.\n self._track_data_statistics(info_l, last_info, episode_len,\n all_stats, maxlen_stats)\n\n if save_imgs:\n # See transporter.py, mirroring Transporter.get_heightmap().\n obs_input = {'color': last_color, 'depth': last_depth}\n colormap, heightmap = get_heightmap(obs_input)\n heightmap_proc = process_depth(img=heightmap)\n\n # Same as earlier, fusing images together.\n c_img_front = last_color[0] # Shape (480, 640, 3)\n c_img_front = cv2.resize(c_img_front, (426,320)) # numpy shape: (320,426)\n barrier = np.zeros((320,4,3)) # Black barrier of 4 pixels\n combo = np.concatenate((\n cv2.cvtColor(c_img_front, cv2.COLOR_BGR2RGB),\n barrier,\n cv2.cvtColor(colormap, cv2.COLOR_RGB2BGR),\n barrier,\n heightmap_proc),\n axis=1)\n\n # Optionally include title with more details, but env dependent.\n suffix_all = f'{i_ep:06d}-{episode_len:02d}-FINAL.png'\n suffix_all = self._change_name(suffix_all, last_info['extras'])\n cv2.imwrite(os.path.join(outdir,suffix_all), combo)\n # ---------------------------------------------------------------- #\n\n # Debugging / inspection. First, debug all episodes.\n max_l = get_max_episode_len(self.path)\n ep_lengths = np.array(ep_lengths)\n print(f'\\nStats over {self.num_episodes} demos:')\n print(f'ep len avg: {np.mean(ep_lengths):0.3f} +/- {np.std(ep_lengths):0.1f}')\n print(f'ep len median: {np.median(ep_lengths):0.3f}')\n print(f'ep len min/max: {np.min(ep_lengths)}, {np.max(ep_lengths)}')\n num_max = np.sum(ep_lengths == max_l)\n print(f'ep equal to max len ({max_l}): {num_max} / {self.num_episodes}')\n print('You can approximate this as failures, but might overcount).\\n')\n\n # Consider env-specific properties, prefacing with [maxlen] as needed.\n # Though we'll get NaNs if no episodes got to the max -- but thats OK. :)\n print('Now environment-specific statistics:')\n\n if 'cable-shape' in self.path or 'cable-line-notarget' in self.path:\n # For these tasks, I want to see performance as a function of nb_sides.\n low, high = 1, 1 # for cable-line-notarget (one side only)\n if 'cable-shape' in self.path:\n low, high = 2, 4\n for nb_sides in range(low, high + 1):\n md = maxlen_stats[f'done_{nb_sides}']\n mf = maxlen_stats[f'frac_{nb_sides}']\n ad = all_stats[f'done_{nb_sides}']\n af = all_stats[f'frac_{nb_sides}']\n al = all_stats[f'len_{nb_sides}']\n print(f'[maxlen] {nb_sides}_done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] {nb_sides}_frac: {np.mean(mf):0.3f} +/- {np.std(mf):0.1f}')\n print(f'[alleps] {nb_sides}_done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] {nb_sides}_frac: {np.mean(af):0.3f} +/- {np.std(af):0.1f}')\n print(f'[alleps] {nb_sides}_len: {np.mean(al):0.2f} +/- {np.std(al):0.1f}\\n')\n\n elif 'cable-ring' in self.path:\n # A bit tricky to interpret area, so I am using percentage improvement.\n md = maxlen_stats['done']\n mf = maxlen_stats['fraction']\n mf_c = maxlen_stats['fraction_delta']\n ma_pi = maxlen_stats['percent_improve']\n ad = all_stats['done']\n af = all_stats['fraction']\n af_c = all_stats['fraction_delta']\n aa_pi = all_stats['percent_improve']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] fraction: {np.mean(mf):0.3f} +/- {np.std(mf):0.1f}')\n print(f'[maxlen] f-delta: {np.mean(mf_c):0.3f} +/- {np.std(mf_c):0.1f}')\n print(f'[maxlen] % area: {np.mean(ma_pi):0.3f} +/- {np.std(ma_pi):0.1f}')\n print(f'[alleps] done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] fraction: {np.mean(af):0.3f} +/- {np.std(af):0.1f}')\n print(f'[alleps] f-delta: {np.mean(af_c):0.3f} +/- {np.std(af_c):0.1f}')\n print(f'[alleps] % area: {np.mean(aa_pi):0.3f} +/- {np.std(aa_pi):0.1f}')\n\n elif 'cloth-flat' in self.path:\n # Report coverage deltas for `cloth-flat` and `cloth-flat-notarget`.\n md = maxlen_stats['done']\n mc = maxlen_stats['cloth_coverage']\n mc_d = maxlen_stats['coverage_delta']\n ad = all_stats['done']\n ac = all_stats['cloth_coverage']\n ac_d = all_stats['coverage_delta']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] cov-final: {np.mean(mc):0.3f} +/- {np.std(mc):0.1f}')\n print(f'[maxlen] cov-delta: {np.mean(mc_d):0.3f} +/- {np.std(mc_d):0.1f}')\n print(f'[alleps] done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] cov-final: {np.mean(ac):0.3f} +/- {np.std(ac):0.1f}')\n print(f'[alleps] cov-delta: {np.mean(ac_d):0.3f} +/- {np.std(ac_d):0.1f}')\n\n elif 'cloth-cover' in self.path:\n # As of Aug 12, this env should have all rollouts with 2 actions.\n md = maxlen_stats['done']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n\n elif 'bag-alone-open' in self.path:\n # Similar to cable-ring.\n md = maxlen_stats['done']\n mf = maxlen_stats['fraction']\n mf_c = maxlen_stats['fraction_delta']\n ma_pi = maxlen_stats['percent_improve']\n ad = all_stats['done']\n af = all_stats['fraction']\n af_c = all_stats['fraction_delta']\n aa_pi = all_stats['percent_improve']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] fraction: {np.mean(mf):0.3f} +/- {np.std(mf):0.1f}')\n print(f'[maxlen] f-delta: {np.mean(mf_c):0.3f} +/- {np.std(mf_c):0.1f}')\n print(f'[maxlen] % area: {np.mean(ma_pi):0.3f} +/- {np.std(ma_pi):0.1f}')\n print(f'[alleps] done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] fraction: {np.mean(af):0.3f} +/- {np.std(af):0.1f}')\n print(f'[alleps] f-delta: {np.mean(af_c):0.3f} +/- {np.std(af_c):0.1f}')\n print(f'[alleps] % area: {np.mean(aa_pi):0.3f} +/- {np.std(aa_pi):0.1f}')\n\n elif 'bag-items-easy' in self.path or 'bag-items-hard' in self.path:\n # Unlike other tasks, here we already did heavy data filtering beforehand.\n md = maxlen_stats['done']\n m_ts = maxlen_stats['task_stage']\n m_zi = maxlen_stats['zone_items_rew']\n m_zb = maxlen_stats['zone_beads_rew']\n ad = all_stats['done']\n a_ts = all_stats['task_stage']\n a_zi = all_stats['zone_items_rew']\n a_zb = all_stats['zone_beads_rew']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] task_stage: {np.mean(m_ts):0.3f} +/- {np.std(m_ts):0.1f}')\n print(f'[maxlen] zone_items: {np.mean(m_zi):0.3f} +/- {np.std(m_zi):0.1f}')\n print(f'[maxlen] zone_beads: {np.mean(m_zb):0.3f} +/- {np.std(m_zb):0.1f}')\n print(f'[alleps] done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] task_stage: {np.mean(a_ts):0.3f} +/- {np.std(a_ts):0.1f}')\n print(f'[alleps] zone_items: {np.mean(a_zi):0.3f} +/- {np.std(a_zi):0.1f}')\n print(f'[alleps] zone_beads: {np.mean(a_zb):0.3f} +/- {np.std(a_zb):0.1f}')\n\n elif 'bag-color-goal' in self.path:\n # Also does data filtering beforehand.\n md = maxlen_stats['done']\n m_ts = maxlen_stats['task_stage']\n m_th = maxlen_stats['frac_in_target_bag']\n m_dh = maxlen_stats['frac_in_distract_bag']\n ad = all_stats['done']\n a_ts = all_stats['task_stage']\n a_th = all_stats['frac_in_target_bag']\n a_dh = all_stats['frac_in_distract_bag']\n print(f'[maxlen] done: {np.sum(md)} / {len(md)}')\n print(f'[maxlen] task_stage: {np.mean(m_ts):0.3f} +/- {np.std(m_ts):0.1f}')\n print(f'[maxlen] frac_target_bag: {np.mean(m_th):0.3f} +/- {np.std(m_th):0.1f}')\n print(f'[maxlen] frac_distract_bag: {np.mean(m_dh):0.3f} +/- {np.std(m_dh):0.1f}')\n print(f'[alleps] done: {np.sum(ad)} / {len(ad)}')\n print(f'[alleps] task_stage: {np.mean(a_ts):0.3f} +/- {np.std(a_ts):0.1f}')\n print(f'[alleps] frac_target_bag: {np.mean(a_th):0.3f} +/- {np.std(a_th):0.1f}')\n print(f'[alleps] frac_distract_bag: {np.mean(a_dh):0.3f} +/- {np.std(a_dh):0.1f}')", "def save_images(PATH, show_img, datasets, from_dataset):\n dataset = datasets[from_dataset]\n imgModels = dataset['models']\n for modelname, model in imgModels.items():\n print('save', modelname)\n plt.imshow(model[70])\n plt.set_cmap(\"gray\")\n plt.axis('off')\n plt.savefig(PATH + '/' + from_dataset + '_' + modelname + '.png', dpi=400)\n\n if show_img == True:\n plt.show()", "def save(self, i_episode):\n if i_episode % self.state.config.save_freq == 0:\n if self.state._models is None:\n self.register_models()\n save_dir = os.path.join(MODULE_CONFIG.BaseConfig.PATH_CHECKPOINT, str(i_episode))\n Directories.mkdir(save_dir)\n for k, model in self.state._models.items():\n model.save(\n file_name_with_path=os.path.join(save_dir,\n f'e_{i_episode}_{k if model.name == \"\" else model.name}.th'))\n\n with open(os.path.join(save_dir, f\"e_{i_episode}.meta\"), 'w') as f:\n json.dump(self.state, f, cls=CustomJsonEncoder, indent=2)\n _exp_meta = json.load(open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, '..', '..',\n MODULE_CONFIG.BaseConfig.EXPERIMENTS_META_NAME + '.json')))\n _exp_name = MODULE_CONFIG.BaseConfig.BASE_DIR.split('/')[-2]\n _exp_run = MODULE_CONFIG.BaseConfig.BASE_DIR.split('/')[-1]\n _exp_meta[_exp_name][_exp_run]['available_checkpoints'].append(i_episode)\n json.dump(_exp_meta, open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, '..', '..',\n MODULE_CONFIG.BaseConfig.EXPERIMENTS_META_NAME + '.json'), 'w'),\n indent=2)", "def test_save_images(self):\n save_file(self.quart.save_images, to_single_file=False)", "def loopitems(items, db):\n for item in items:\n if 'error' in item.keys():\n saveimage(item, db, 'collection_image_status')\n else:\n if checkfieldunicity(db, item['md5']):\n saveimage(item, db, 'collection_image')\n saveimage(itemstatus(item, error='RAS'),\n db,\n 'collection_image_status')\n else:\n saveimage(itemstatus(item), db, 'collection_image_status')", "def save_to_images(self):\n \n logging.debug(\"save_to_images called\")\n # return None\n notify(\"Saving to images\")\n # first, create the images\n image_map = {}\n for machine in self.machines:\n logging.info(\"Creating image for %s\" % machine)\n notify(\"Creating image for %s\" % machine)\n m = self.machines[machine]\n img_id = m.create_image()\n logging.debug(\"machine: %s, img_id: %s\" % (str(machine), str(img_id) ))\n\n old_img_id = self.images.get(m.machine_name, None)\n if old_img_id:\n logging.info(\"machine %s old image added to old_images %s \" % ( str(machine), str(old_img_id) ))\n self.old_images.append(old_img_id)\n image_map[m.machine_name] = img_id\n \n # print image_map\n # FIXME: this needs to be updating the cloudfiles\n # savefile = open(self.savefile, 'w')\n # yaml.dump(image_map, savefile)\n # savefile.close()\n # print self.images\n # print image_map\n notify(\"Saving config\")\n self.images = image_map\n self.save()", "def parse(self):\n writer = mediaoutput.CustomImageWriter(self.output_dir, self.file_format)\n with open(self.timetable_path) as timetable:\n for line in timetable:\n slide = self.slides.pop(0)\n slide_times = line[line.index(':') + 2:].split(' ')\n for time in slide_times:\n writer.write(slide.img, time.rstrip())", "def storeAllOnDisk(self, path):\n # fetch meta data\n urls = list()\n y_data = self.data_dict.labels\n ids = self.data_dict.unique_ids\n urls = self.data_dict.paths\n\n # save in chunks of 1000 images\n cuts = [x for x in range(0, self.n_observations, 1000)]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n for i in range(0, (len(cuts) - 1)):\n\n idx = [x for x in range(cuts[i], cuts[i+1])]\n\n current_ids = [ids[z] for z in idx]\n current_urls = [urls[z] for z in idx]\n current_y = [y_data[z] for z in idx]\n\n # invoke asynchronous read\n binary_images = self.imageLoader.getImages(current_urls)\n\n # store on disk\n img_id = 0\n for c_id, c_y in zip(current_ids, current_y):\n # check directory\n if not os.path.isdir(path + str(c_y)):\n os.mkdir(path + str(c_y))\n # define path\n path_img = path + str(c_y) + \"/\" + \\\n str(c_id) + \".jpeg\"\n img = binary_images[img_id]\n img = img.resize(self.image_size)\n img.save(path_img)\n img_id += 1\n return None", "def on_step_end(self, step, logs):\n episode = logs['episode']\n self.observations[episode].append(logs['observation'])\n self.rewards[episode].append(logs['reward'])\n self.actions[episode].append(logs['action'])\n self.metrics[episode].append(logs['metrics'])\n self.step += 1", "def on_step_end(self, step, logs):\n episode = logs['episode']\n self.observations[episode].append(logs['observation'])\n self.rewards[episode].append(logs['reward'])\n self.actions[episode].append(logs['action'])\n self.metrics[episode].append(logs['metrics'])\n self.step += 1", "def saveItemImage(item):\n itemName = item['data']['name']\n console('Getting image for: {}'.format(itemName))\n fullImgUrl = '{}{}.jpg'.format(mfc_img_base, item['data']['id'])\n req = urllib.request.Request(fullImgUrl, \\\n headers={'User-Agent' : \"Magic Browser\"})\n foutName = '{}/{}.jpg'.format(imageFolder, \\\n re.sub(r'([^\\s\\w]|_)+', '', itemName)[:32])\n with urllib.request.urlopen(req) as response, \\\n open(foutName, 'wb+') as fout:\n shutil.copyfileobj(response, fout)", "def genImages(self, gen_ts):\n t1 = time.time()\n ngen = 0\n\n # determine how much logging is desired\n log_success = to_bool(search_up(self.image_dict, 'log_success', True))\n\n # Loop over each time span class (day, week, month, etc.):\n for timespan in self.image_dict.sections:\n\n # Now, loop over all plot names in this time span class:\n for plotname in self.image_dict[timespan].sections:\n\n # Accumulate all options from parent nodes:\n plot_options = accumulateLeaves(self.image_dict[timespan][plotname])\n\n plotgen_ts = gen_ts\n if not plotgen_ts:\n binding = plot_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n plotgen_ts = db_manager.lastGoodStamp()\n if not plotgen_ts:\n plotgen_ts = time.time()\n\n image_root = os.path.join(self.config_dict['WEEWX_ROOT'],\n plot_options['HTML_ROOT'])\n # Get the path that the image is going to be saved to:\n img_file = os.path.join(image_root, '%s.png' % plotname)\n\n # Convert from string to an integer:\n ai = weeutil.weeutil.nominal_spans(plot_options.get('aggregate_interval'))\n # Check whether this plot needs to be done at all:\n if skipThisPlot(plotgen_ts, ai, img_file):\n continue\n\n # skip image files that are fresh, but only if staleness is defined\n stale = to_int(plot_options.get('stale_age'))\n if stale:\n t_now = time.time()\n try:\n last_mod = os.path.getmtime(img_file)\n if t_now - last_mod < stale:\n log.debug(\"Skip '%s': last_mod=%s age=%s stale=%s\",\n img_file, last_mod, t_now - last_mod, stale)\n continue\n except os.error:\n pass\n\n # Create the subdirectory that the image is to be put in. Wrap in a try block in\n # case it already exists.\n try:\n os.makedirs(os.path.dirname(img_file))\n except OSError:\n pass\n\n # Create a new instance of a time plot and start adding to it\n plot = weeplot.genplot.TimePlot(plot_options)\n\n # Calculate a suitable min, max time for the requested time.\n minstamp, maxstamp, timeinc = weeplot.utilities.scaletime(\n plotgen_ts - int(plot_options.get('time_length', 86400)), plotgen_ts)\n # Override the x interval if the user has given an explicit interval:\n timeinc_user = to_int(plot_options.get('x_interval'))\n if timeinc_user is not None:\n timeinc = timeinc_user\n plot.setXScaling((minstamp, maxstamp, timeinc))\n\n # Set the y-scaling, using any user-supplied hints:\n yscale = plot_options.get('yscale', ['None', 'None', 'None'])\n plot.setYScaling(weeutil.weeutil.convertToFloat(yscale))\n\n # Get a suitable bottom label:\n bottom_label_format = plot_options.get('bottom_label_format', '%m/%d/%y %H:%M')\n bottom_label = time.strftime(bottom_label_format, time.localtime(plotgen_ts))\n plot.setBottomLabel(bottom_label)\n\n # Set day/night display\n plot.setLocation(self.stn_info.latitude_f, self.stn_info.longitude_f)\n plot.setDayNight(to_bool(plot_options.get('show_daynight', False)),\n weeplot.utilities.tobgr(plot_options.get('daynight_day_color',\n '0xffffff')),\n weeplot.utilities.tobgr(plot_options.get('daynight_night_color',\n '0xf0f0f0')),\n weeplot.utilities.tobgr(plot_options.get('daynight_edge_color',\n '0xefefef')))\n\n # Loop over each line to be added to the plot.\n for line_name in self.image_dict[timespan][plotname].sections:\n\n # Accumulate options from parent nodes.\n line_options = accumulateLeaves(self.image_dict[timespan][plotname][line_name])\n\n # See what observation type to use for this line. By default, use the section\n # name.\n var_type = line_options.get('data_type', line_name)\n\n # Look for aggregation type:\n aggregate_type = line_options.get('aggregate_type')\n if aggregate_type in (None, '', 'None', 'none'):\n # No aggregation specified.\n aggregate_type = aggregate_interval = None\n else:\n try:\n # Aggregation specified. Get the interval.\n aggregate_interval = weeutil.weeutil.nominal_spans(\n line_options['aggregate_interval'])\n except KeyError:\n log.error(\"Aggregate interval required for aggregate type %s\",\n aggregate_type)\n log.error(\"Line type %s skipped\", var_type)\n continue\n\n # Now its time to find and hit the database:\n binding = line_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n # we need to pass the line options and plotgen_ts to our xtype\n # first get a copy of line_options\n option_dict = dict(line_options)\n # but we need to pop off aggregate_type and\n # aggregate_interval as they are used as explicit arguments\n # in our xtypes call\n option_dict.pop('aggregate_type', None)\n option_dict.pop('aggregate_interval', None)\n # then add plotgen_ts\n option_dict['plotgen_ts'] = plotgen_ts\n start_vec_t, stop_vec_t ,data_vec_t = weewx.xtypes.get_series(\n var_type,\n TimeSpan(minstamp, maxstamp),\n db_manager,\n aggregate_type=aggregate_type,\n aggregate_interval=aggregate_interval,\n **option_dict)\n\n # Get the type of plot (\"bar', 'line', or 'vector')\n plot_type = line_options.get('plot_type', 'line').lower()\n\n if aggregate_type and plot_type != 'bar':\n # If aggregating, put the point in the middle of the interval\n start_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in start_vec_t[0]], # Value\n start_vec_t[1], # Unit\n start_vec_t[2]) # Unit group\n stop_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in stop_vec_t[0]], # Velue\n stop_vec_t[1], # Unit\n stop_vec_t[2]) # Unit group\n\n # Convert the data to the requested units\n new_data_vec_t = self.converter.convert(data_vec_t)\n\n # Add a unit label. NB: all will get overwritten except the last. Get the label\n # from the configuration dictionary.\n unit_label = line_options.get(\n 'y_label', self.formatter.get_label_string(new_data_vec_t[1]))\n # Strip off any leading and trailing whitespace so it's easy to center\n plot.setUnitLabel(unit_label.strip())\n\n # See if a line label has been explicitly requested:\n label = line_options.get('label')\n if label:\n # Yes. Get the text translation\n label = self.text_dict[label]\n else:\n # No explicit label. Look up a generic one.\n # NB: generic_dict is a KeyDict which will substitute the key\n # if the value is not in the dictionary.\n label = self.generic_dict[var_type]\n\n # See if a color has been explicitly requested.\n color = line_options.get('color')\n if color is not None: color = weeplot.utilities.tobgr(color)\n fill_color = line_options.get('fill_color')\n if fill_color is not None: fill_color = weeplot.utilities.tobgr(fill_color)\n\n # Get the line width, if explicitly requested.\n width = to_int(line_options.get('width'))\n\n interval_vec = None\n gap_fraction = None\n vector_rotate = None\n\n # Some plot types require special treatments:\n if plot_type == 'vector':\n vector_rotate_str = line_options.get('vector_rotate')\n vector_rotate = -float(vector_rotate_str) \\\n if vector_rotate_str is not None else None\n elif plot_type == 'bar':\n interval_vec = [x[1] - x[0] for x in\n zip(start_vec_t.value, stop_vec_t.value)]\n elif plot_type == 'line':\n gap_fraction = to_float(line_options.get('line_gap_fraction'))\n if gap_fraction is not None and not 0 < gap_fraction < 1:\n log.error(\"Gap fraction %5.3f outside range 0 to 1. Ignored.\",\n gap_fraction)\n gap_fraction = None\n else:\n log.error(\"Unknown plot type '%s'. Ignored\", plot_type)\n continue\n\n # Get the type of line (only 'solid' or 'none' for now)\n line_type = line_options.get('line_type', 'solid')\n if line_type.strip().lower() in ['', 'none']:\n line_type = None\n\n marker_type = line_options.get('marker_type')\n marker_size = to_int(line_options.get('marker_size', 8))\n \n # Add the line to the emerging plot:\n plot.addLine(weeplot.genplot.PlotLine(\n stop_vec_t[0], new_data_vec_t[0],\n label = label,\n color = color,\n fill_color = fill_color,\n width = width,\n plot_type = plot_type,\n line_type = line_type,\n marker_type = marker_type,\n marker_size = marker_size,\n bar_width = interval_vec,\n vector_rotate = vector_rotate,\n gap_fraction = gap_fraction))\n\n # OK, the plot is ready. Render it onto an image\n image = plot.render()\n\n try:\n # Now save the image\n image.save(img_file)\n ngen += 1\n except IOError as e:\n log.error(\"Unable to save to file '%s' %s:\", img_file, e)\n t2 = time.time()\n\n if log_success:\n log.info(\"Generated %d images for report %s in %.2f seconds\",\n ngen,\n self.skin_dict['REPORT_NAME'], t2 - t1)", "def save(self):\n img = Image.new(\"1\", (self.container.width, self.container.height))\n draw = ImageDraw.Draw(img)\n for item in self.items:\n draw.ellipse(item.box_coordinates(), fill=1)\n del draw\n img.save(\"plot.bmp\", \"bmp\")", "def save_test_images(images):\n for description, img in images.items():\n save_to_image(img, description)\n save_to_netcdf(img, description)", "def stack_images(self, observation, p, update=False, nextstate=False):\n # image preprocessing\n img_preprocessed = self.preprocessing(observation)\n if update == False:\n if (len(self.img_collection[p]) == 0): # start of new episode, use len() instead of timestep to stay Markovian\n # img_collection get filled with zeros\n self.img_collection[p] = deque([np.zeros((100,100), dtype=np.int) for i in range(4)], maxlen=4)\n # fill img_collection 4x with the first frame\n self.img_collection[p].append(img_preprocessed)\n self.img_collection[p].append(img_preprocessed)\n self.img_collection[p].append(img_preprocessed)\n self.img_collection[p].append(img_preprocessed)\n # Stack the images in img_collection\n img_stacked = np.stack(self.img_collection[p], axis=2)\n else:\n # Delete first/oldest entry and append new image\n self.img_collection[p].append(img_preprocessed)\n\n #CHECK TO SEE IF THE PICTURES LOOK GOOD (DONE:WORK)\n #np_array = np.array(self.img_collection[p][0])\n #plt.imsave( \"Image0_%s_%d.png\" % (p, self.timestepss), np_array, cmap='Greys')\n #np_array = np.array(self.img_collection[p][1])\n #plt.imsave( \"Image1_%s_%d.png\" % (p, self.timestepss), np_array, cmap='Greys')\n #np_array = np.array(self.img_collection[p][2])\n #plt.imsave( \"Image2_%s_%d.png\" % (p, self.timestepss), np_array, cmap='Greys')\n #np_array = np.array(self.img_collection[p][3])\n #plt.imsave( \"Image3_%s_%d.png\" % (p, self.timestepss), np_array, cmap='Greys')\n img_stacked = np.stack(self.img_collection[p], axis=2)\n #print(img_stacked.shape)\n return img_stacked\n else:\n if nextstate==True:\n #print('Next State')\n #print(len(self.img_collection_update[p]))\n img_nextstate = self.img_collection_update[p].copy()\n #CHECK TO SEE IF THE PICTURES LOOK GOOD (DONE:WORK)\n \"\"\" np_array = np.array(self.img_collection_update[p][0])\n plt.imsave( \"Image%s_%d_0.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][1])\n plt.imsave( \"Image%s_%d_1.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][2])\n plt.imsave( \"Image%s_%d_2.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][3])\n plt.imsave( \"Image%s_%d_3.png\" % (p, self.timestepss), np_array, cmap='Greys') \"\"\"\n #Appending the new image\n img_nextstate.append(img_preprocessed)\n #CHECK TO SEE IF THE PICTURES LOOK GOOD (DONE:WORK)\n \"\"\" np_array = np.array(img_nextstate[0])\n plt.imsave( \"Image%s_%d_40.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(img_nextstate[1])\n plt.imsave( \"Image%s_%d_41.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(img_nextstate[2])\n plt.imsave( \"Image%s_%d_42.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(img_nextstate[3])\n plt.imsave( \"Image%s_%d_43.png\" % (p, self.timestepss), np_array, cmap='Greys') \"\"\"\n #if (self.img_collection_update[p][3] != img_nextstate[3]).all:\n # print('true')\n # Stack the images in img_collection\n img_stacked = np.stack(img_nextstate, axis=2)\n return img_stacked\n\n if (len(self.img_collection_update[p]) == 0): # start of new episode, use len() instead of timestep to stay Markovian\n # img_collection get filled with zeros\n #print('New', p)\n self.img_collection_update[p] = deque([np.zeros((100,100), dtype=np.int) for i in range(4)], maxlen=4)\n # fill img_collection 4x with the first frame\n self.img_collection_update[p].append(img_preprocessed)\n self.img_collection_update[p].append(img_preprocessed)\n self.img_collection_update[p].append(img_preprocessed)\n self.img_collection_update[p].append(img_preprocessed)\n\n # Stack the images in img_collection\n img_stacked = np.stack(self.img_collection_update[p], axis=2)\n else:\n # Delete first/oldest entry and append new image\n self.img_collection_update[p].append(img_preprocessed)\n #DEBUGGGG CHECKING (WORKS)\n #print(len(self.img_collection_update[p]))\n #print('Adding images')\n #np_array = np.array(self.img_collection_update[p][0])\n \"\"\" plt.imsave( \"Image_%s_%d_0.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][1])\n plt.imsave( \"Image_%s_%d_1.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][2])\n plt.imsave( \"Image_%s_%d_2.png\" % (p, self.timestepss), np_array, cmap='Greys')\n np_array = np.array(self.img_collection_update[p][3])\n plt.imsave( \"Image_%s_%d_3.png\" % (p, self.timestepss), np_array, cmap='Greys') \"\"\"\n # Stack the images in img_collection\n img_stacked = np.stack(self.img_collection_update[p], axis=2)\n return img_stacked", "def save_figures(expt):\n if isinstance(expt, str):\n expt = get_experiment(expt)\n \n tr_expt = get_training_expt(expt)\n\n storage.ensure_directory(expt.figures_dir())\n\n for it in tr_expt.save_after:\n for avg in AVG_VALS:\n print 'Iteration', it\n try:\n rbm = load_rbm(expt, it, avg)\n except:\n continue\n final_states = storage.load(expt.final_states_file(it, avg))\n gibbs_states = storage.load(expt.gibbs_states_file(it, avg))\n\n fig = rbm_vis.show_particles(rbm, final_states, expt.dataset)\n misc.save_image(fig, expt.final_states_figure_file(it, avg))\n\n fig = rbm_vis.show_particles(rbm, gibbs_states, expt.dataset)\n misc.save_image(fig, expt.gibbs_states_figure_file(it, avg))\n\n print_log_probs(expt, open(expt.log_probs_text_file(), 'w'))", "def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)", "def saveImages(saveImagePath,dataForSaving,enumeratedList):\n \n for i in range(len(dataForSaving[0])):\n singleChar = dataForSaving[0][i]\n singleImage = dataForSaving[1][i]\n \n if singleChar not in enumeratedList:\n enumeratedList.append(singleChar)\n \n dimension = int(singleImage.shape[0]**0.5)\n singleImage = Image.fromarray(np.resize(singleImage,(dimension,dimension)), 'L')\n \n copyVal = 0\n while os.path.exists('{}\\\\{}_copy{}.png'.format(saveImagePath,\\\n enumeratedList.index(singleChar),copyVal)):\n copyVal += 1\n \n singleImage.save('{}\\\\{}_copy{}.png'.format(saveImagePath,\\\n enumeratedList.index(singleChar),copyVal))", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n names = ['inputA_', 'inputB_', 'fakeA_',\n 'fakeB_', 'cycA_', 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'\n ), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n inputs = sess.run(self.inputs)\n fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.fake_images_b,\n self.cycle_images_a,\n self.cycle_images_b\n ], feed_dict={\n self.input_a: inputs['images_i'],\n self.input_b: inputs['images_j']\n })\n\n tensors = [inputs['images_i'], inputs['images_j'],\n fake_B_temp, fake_A_temp, cyc_A_temp, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name),\n ((tensor[0] + 1) * 127.5).astype(np.uint8)\n )\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")", "def _dump_image(self):\n if not self._current_id == len(self._img_ids):\n warnings.warn(\n 'Recorded {} out of {} validation images, incomplete results'.format(\n self._current_id, len(self._img_ids)))\n try:\n for im_name, im in self._panoptic_images.items():\n cv2.imwrite(osp.join(self._save_imgpath, im_name), im)\n except IOError as e:\n raise RuntimeError(\"Unable to dump images, ignored. What(): {}\".format(str(e)))", "def save_em_jpgs(self, output_dir, tag):\n for i in range(self.N_itr):\n self.plot_EM_estimate(i)\n plt.savefig(os.path.join(\n output_dir,\n 'em_est_{}_{:03}.jpg'.format(tag, i)), dpi=50)", "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n if not os.path.exists(os.path.join(self._images_dir, 'imgs')):\n os.makedirs(os.path.join(self._images_dir, 'imgs'))\n \n names = ['inputB_', 'fakeB_depth_' , 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n x1_t, name1 = self.dataset.next_batch()\n count = 0\n fake_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.cycle_images_b], \n feed_dict={self.input_b: x1_t})\n \n fakedepth = fake_A_temp[:,:,:,-1]\n tensors = [x1_t, fakedepth, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n #print(name)\n # if name == 'inputB_' or name == 'fakeB_depth_':\n # image_name = name1[count] + '_' + name + str(epoch) + \"_\" + str(i) + \".jpg\"\n # imsave(os.path.join(self._images_dir, 'imgs', image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n # else:\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")\n count += 1" ]
[ "0.64021105", "0.6391139", "0.6146052", "0.6066712", "0.6050973", "0.60094845", "0.5998496", "0.59843314", "0.58776593", "0.58635044", "0.5851753", "0.5802809", "0.57339936", "0.5727365", "0.57036406", "0.56863064", "0.56863064", "0.56473136", "0.56447446", "0.56397223", "0.56353503", "0.56052744", "0.55972314", "0.55790496", "0.55683386", "0.5555781", "0.5514508", "0.55143374", "0.5500255", "0.549071" ]
0.6513759
0
Routine for determining the index mu such that t_mu <= x < t_mu+1. If x is larger than t[1], then the last index is returned, for convenience sake. If x is smaller than t[0], then the first index is returned, for convenience sake.
def index(x, t): if x < t[0]: return 0 for i in range(len(t) - 1): if t[i] <= x < t[i + 1]: return i return len(t) - 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def find_gt_index(a, x):\n i = bisect_right(a, x)\n if i < len(a):\n return i\n raise ValueError", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n ind = k+self.offset\n return ind, self.which[k]\n return None, None", "def _get_x_of_t(self, arr):\n\n t_max = self._get_max_t()\n arr = list(arr)\n\n if arr[-1][0] < t_max:\n arr.append([t_max, arr[-1][1]])\n\n arr = np.array(arr)\n return arr[:, 1], arr[:, 0]", "def fn(arr, x):\n lo, hi = 0, len(arr)\n while lo < hi: \n mid = lo + hi >> 1\n if arr[mid] < x: lo = mid+1\n else: hi = mid\n return lo", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n return k+self.offset\n return None", "def mu(self, x):\n x = utils.to_float_if_int(x)\n utils.verify_is_numeric(x)\n\n return (\n 0\n if (x < self.__l or x > self.__r)\n else (x - self.__l) / (self.__n - self.__l)\n if (self.__l <= x <= self.__n)\n else (self.__r - x) / (self.__r - self.__n)\n )", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def tauPoint(numDeps, tauRos, tau):\r\n\r\n #int index;\r\n\r\n help = [0.0 for i in range(numDeps)]\r\n\r\n for i in range(0, numDeps):\r\n\r\n help[i] = tauRos[0][i] - tau\r\n help[i] = abs(help[i])\r\n\r\n \r\n index = 0\r\n min = help[index]\r\n\r\n for i in range(1, numDeps):\r\n\r\n if (help[i] < min): \r\n min = help[i]\r\n index = i\r\n \r\n\r\n return index", "def bisect(self, t):\n l = 0\n h = len(self.lst)\n while l < h:\n m = (l+h)/2\n if self.lst[m] < t:\n l = m+1\n else:\n h = m\n\n return l", "def findsubintervals (t ,x):\n k, m = len(t), len(x)\n if k<2:\n return zeros(m,1)\n else:\n j = concatenate([t, x]).argsort()\n i = nonzero(j >= k)\n arr = arange(0,m)\n arr = i - arr - 1\n arr = arr[0]\n return arr", "def _get_x_of_t(self,arr,normed=True):\n t_max = self._get_max_t()\n arr = list(arr)\n\n if arr[-1][0]<t_max:\n arr.append([t_max,arr[-1][1]])\n\n arr = np.array(arr)\n if normed:\n return arr[:,1], arr[:,0]\n else:\n return arr[:,1]*self.G.number_of_nodes(), arr[:,0]", "def rect_xi(t, T=1):\n return (t>=-T/2) & (t <= T/2), 0", "def testing_index(self, dt_plus=None, dt_minus=0.):\n assert dt_minus >= 0., \"dt_minus must be positive\"\n bottom = self.cutoff_t + dt_minus\n if dt_plus:\n assert dt_plus >= 0., \"dt_plus must be positive\"\n # ind = (self.t > bottom) & (self.t <= (self.cutoff_t + dt_plus))\n ind = (self.t >= bottom) & (self.t < (self.cutoff_t + dt_plus))\n else:\n # ind = self.t > bottom\n ind = self.t >= bottom\n\n return np.where(ind)[0]", "def find_lt(a,x):\n i = bisect_left(a,x)\n if i:\n return a[i-1]\n raise ValueError", "def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def index(a, x):\n i = bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n raise ValueError", "def get_nearest_index(self, x_value: float) -> int:\n return int(np.argmax(self.x >= x_value))", "def item_t(data_alt, item, min_t):\n for t in data_alt[item]:\n if t >= min_t:\n return t\n return None", "def find_bmu(t, net, m):\n #inicializa o index\n bmu_index = np.array([0,0])\n #inicia distancia minima para um numero bem grande\n min_dist = np.iinfo(np.int).max\n #anda pela matriz de pesos e procura menor distancia do vetor t\n for x in range(net.shape[0]):\n for y in range(net.shape[1]):\n #pesos atuais que estou considerando\n w = net[x, y, :].reshape(m,1) #transforma matriz em vetor 3D\n #calcula distancia euclidiana ao quadrado (evita tirar raiz)\n sq_dist = np.sum((w - t) ** 2) #soma as diferencas ao quadrado de cada valor do vetor\n if sq_dist < min_dist: #se distancia eh menor salva valor e index\n min_dist = sq_dist\n bmu_index = np.array([x,y])\n\n #depois de percorrer a matriz tenho a menor distancia e o index do vetor BMU\n #pega vetor dentro do net\n bmu = net[bmu_index[0], bmu_index[1], :].reshape(m,1)\n #retorna o bmu e o indice\n return (bmu, bmu_index)", "def find_min_index(A: List[int], x: int) -> int:\n min_index = -1\n start = 0\n end = len(A)-1\n\n while start <= end:\n mid = start + (end-start)//2\n\n if A[mid] == x:\n min_index = mid\n end = mid - 1\n elif x < A[mid]:\n end = mid - 1\n else:\n start = mid + 1\n\n return min_index", "def ttost_ind(self, low, upp, usevar=\"pooled\"):\n tt1 = self.ttest_ind(alternative=\"larger\", usevar=usevar, value=low)\n tt2 = self.ttest_ind(alternative=\"smaller\", usevar=usevar, value=upp)\n # TODO: remove tuple return, use same as for function tost_ind\n return np.maximum(tt1[1], tt2[1]), (tt1, tt2)", "def findFirstElementGreaterThan(self, array, index):\n l, r = 0, len(array) - 1\n ans = -1;\n while (l <= r):\n mid = l + (r - l) // 2;\n # Move to right side if target is greater\n if (array[mid] <= index):\n l = mid + 1;\n # Move left side.\n else:\n ans = mid;\n r = mid - 1;\n return ans;", "def Ni_find(t):\r\n return ep(t) - 1", "def find_x_for_T(self, T_0=1.e9):\n\n # our strategy here assumes that the hot ash is in the early\n # part of the profile. We then find the index of the first\n # point where T drops below T_0\n idx = np.where(self.T < T_0)[0][0]\n\n T1 = self.T[idx-1]\n x1 = self.x[idx-1]\n\n T2 = self.T[idx]\n x2 = self.x[idx]\n\n slope = (x2 - x1)/(T2 - T1)\n\n return x1 + slope*(T_0 - T1)", "def find_le(a, x):\n i = bisect_right(a, x)\n if i:\n return a[i - 1]\n raise ValueError" ]
[ "0.6467503", "0.6289578", "0.62218434", "0.60289025", "0.59865326", "0.59564394", "0.5892032", "0.5744281", "0.57170856", "0.5653306", "0.5653306", "0.5596471", "0.5575372", "0.5501344", "0.5484174", "0.5461618", "0.54523337", "0.5445007", "0.5395855", "0.5392234", "0.5381911", "0.5361144", "0.53307205", "0.53269905", "0.5318269", "0.53052884", "0.52886015", "0.52718526", "0.52506554", "0.5250037" ]
0.7450273
0
Routine for computing the polynomial curve q of degree p that interpolates the points c.
def algorithm_1_1(p, c, t, x): q = np.array(c, dtype=np.float64) for k in range(1, p + 1): for j in range(0, p - k + 1): q[j] = (t[j + k] - x) / (t[j + k] - t[j]) * q[j] + (x - t[j]) / ( t[j + k] - t[j]) * q[j + 1] return q[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]", "def cubic_interpol(X_P, Y_P):\r\n y_derivs = derivatives( X_P, Y_P ).flatten() # flatten as FB_sub returns 2d array\r\n \r\n for j in np.arange( X_P.shape[0] - 1 ): # for every x[i] and x[i+1] pair\r\n plot_points = np.linspace( X_P[j], X_P[j+1], 20) # points to plot in the interval\r\n params = [ X_P[j], X_P[j+1], Y_P[j], Y_P[j+1],\r\n y_derivs[j], y_derivs[j+1]]\r\n f_points = f(plot_points, params)\r\n plt.plot(plot_points, f_points, 'b-', ms = .5, label = 'Cubic'if j==0 else \"\") # only label one plot\r", "def coef_approximation(p, order):\n\n\t# maintain parity of order +1\n\tn = 50 + order +1\n\tr = 1\n\txs = np.linspace(-r, r, num=n)\n\tys = p(xs)\n\n\t# [TODO]: fix coeffients method\n\t# replace with 'c = coeffients(xs, ys)'\n\tdegree = n \n\tc = np.polyfit(xs,ys,degree)\n\n\treturn c", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def analytical_integral_rppd(p, q, r, a, b, c):\n if p < 0:\n return 0.0\n elif q < 0:\n return 0.0\n elif r < 0.0:\n return 0.0\n else:\n return (\n a ** (p + 1)\n * b ** (q + 1)\n * c ** (r + 1)\n * ((-1) ** p + 1)\n * ((-1) ** q + 1)\n * ((-1) ** r + 1)\n / ((p + 1) * (q + 1) * (r + 1))\n )", "def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)", "def qspline_params(self):\n b = np.zeros(self.n-1)\n c = np.zeros(self.n-1)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n \n # Find c forward-recursively\n list = range(self.n-2)\n for i in list:\n c[i+1] = (p[i+1] - p[i] - c[i] * dx[i]) / dx[i+1]\n \n # Find c backward-recursively from 1/2c_n-1\n c[-1] = c[-1] / 2\n for i in list[::-1]:\n c[i] = (p[i+1] - p[i] - c[i+1] * dx[i+1]) / dx[i]\n\n # Find b\n for i in range(self.n-1):\n b[i] = p[i] - c[i] * dx[i]\n return b, c", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def coeffients(x, y):\n\n # ensure floating point datatypes\n x.astype(float)\n y.astype(float)\n\n # degree of interpolating polynomial\n n = len(x)\n\n # intitilize list of coeffients for interpolating polynomial to y values\n c = y.tolist()\n\n # compute coeffients\n for j in range(1, n):\n for i in range(n-1, j-1, -1):\n c[i] = float(c[i]-c[i-1])/float(x[i]-x[i-j])\n\n # return an array of polynomial coefficient, note: reverse order for np.polyval function\n return np.array(c[::-1])", "def cspline_params(self):\n b = np.zeros(self.n)\n c = np.zeros(self.n-1)\n d = np.zeros(self.n-1)\n B = np.zeros(self.n)\n Q = np.ones(self.n-1)\n D = 2 * np.ones(self.n)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n\n # Fill B\n B[0] = 3 * p[0]\n for i in range(self.n-2):\n B[i+1] = 3 * (p[i] + p[i+1] * dx[i] / dx[i+1])\n B[-1] = 3 * p[-2]\n \n # Fill D\n for i in range(self.n-2):\n D[i+1] = 2 * dx[i] / dx[i+1] + 2\n\n # Fill Q\n for i in range(self.n-2):\n Q[i+1] = dx[i] / dx[i+1]\n\n # Gauss elimination\n for i in range(1, self.n):\n D[i] = D[i] - Q[i-1] / D[i-1]\n B[i] = B[i] - B[i-1] / D[i-1]\n\n # Back-substitution\n b[-1] = B[-1] / D[-1]\n list = range(self.n-1)\n for i in list[::-1]:\n b[i] = (B[i] - Q[i] * b[i+1]) / D[i]\n\n # Calculate c and d\n for i in range(self.n-1):\n c[i] = (3 * p[i] - 2 * b[i] - b[i+1]) / dx[i]\n d[i] = (b[i] + b[i+1] - 2 * p[i]) / dx[i]\n c[-1] = -3 * d[-1] * dx[-1]\n\n return b, c, d", "def exppoweval(self, p, x, y=None, C=None, sumsq=False, weights=None):\n if C is None:\n cx = 1.0\n else:\n cx = C[0]\n yd = p[0] + p[1] * (1.0 - numpy.exp(-x / p[2])) ** cx\n if y is None:\n return yd\n else:\n if sumsq is True:\n return numpy.sum((y - yd) ** 2)\n else:\n return y - yd", "def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def cubic_spline_interpolation(q_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n q_ = np.transpose(q_)\n\n m = m + (m % (n-1))\n k = int(m / (n-1))\n timesteps = [np.linspace(0, 1, num = k, endpoint = False) for _ in range(n-2)]\n timesteps.append(np.linspace(0, 1, num = k))\n\n # Generate A matrix\n A = np.zeros((dof, n, n))\n # A[:, 0, 0] = 2\n # A[:, 0, 1] = 1\n # A[:, n-1, n-2] = 1\n # A[:, n-1, n-1] = 2\n A[:, 0, 0] = 1\n A[:, n-1, n-1] = 1\n for i in range(1, n-1):\n A[:, i, i - 1] = 1\n A[:, i, i] = 4\n A[:, i, i + 1] = 1\n\n # Generate b matrix\n y = np.zeros((dof, n))\n # y[:, 0] = 3 * (q_[:, 1] - q_[:, 0])\n # y[:, n-1] = 3 * (q_[:, n - 1] - q_[:, n - 2])\n y[:, 0] = 0\n y[:, n-1] = 0\n for i in range(1, n-1):\n y[:, i] = 3 * (q_[:, i + 1] - q_[:, i - 1])\n\n # Solve D\n D = np.linalg.solve(A, y)\n\n # Calculate coefficients\n a = np.copy(q_[:, :n-1])\n b = np.copy(D[:, :n-1])\n c = np.zeros((dof, n-1))\n d = np.zeros((dof, n-1))\n for i in range(0, n-1):\n c[:, i] = 3 * (q_[:, i + 1] - q_[:, i]) - 2 * D[:, i] - D[:, i + 1]\n d[:, i] = 2 * (q_[:, i] - q_[:, i + 1]) + D[:, i] + D[:, i + 1]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n for j in range(n - 1):\n for i in range(len(timesteps[j])):\n t = timesteps[j][i]\n t_2 = t * t\n t_3 = t * t * t\n\n q[:, i + j * k] = a[:, j] + b[:, j] * t + c[:, j] * t_2 + d[:, j] * t_3\n qd[:, i + j * k] = b[:, j] + 2 * c[:, j] * t + 3 * d[:, j] * t_2\n qdd[:, i + j * k] = 2 * c[:, j] + 6 * d[:, j] * t\n\n return q, qd, qdd", "def findCurvePoints(self, x, y, c):\n\t\tyCurve = []\n\t\tfor xi in x:\n\t\t\tyi = self.polynomialFunct(c, xi)\n\t\t\t\n\t\t\tyCurve.append( yi )\n\t\t\n\t\treturn np.asarray(yCurve)", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out", "def polymul(c,P):\n return [c*x for x in P]", "def clfqp(self,x,p):\n alp = self.alp_opt\n nu = self.nu_opt\n dt = self.dt\n n = self.n\n I = np.identity(n)\n M = self.ncm(x,p)\n nu = np.size(self.h_or_g(x,p),1)\n u = cp.Variable((nu,1))\n e = np.reshape(x,(n,1))\n fx = np.reshape(self.dynamicsf(x,p),(n,1))\n gx = self.h_or_g(x,p)\n dMdt = (nu*I-M)/dt\n constraints = [2*e.T@(fx+gx@u)+e.T@dMdt@e <= -2*alp*e.T@M@e]\n prob = cp.Problem(cp.Minimize(cp.sum_squares(u)),constraints)\n prob.solve()\n u = u.value\n u = np.ravel(u)\n return u", "def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]", "def intrpf(xi,x,y):\n\n # calculate yi = p(xi) using Lagrange polynomial \n yi = ((xi-x[1])*(xi-x[2])/((x[0]-x[1])*(x[0]-x[2]))) * y[0]\\\n +((xi-x[0])*(xi-x[2])/((x[1]-x[0])*(x[1]-x[2]))) * y[1]\\\n +((xi-x[0])*(xi-x[1])/((x[2]-x[0])*(x[2]-x[1]))) * y[2]\n return yi", "def project_curve(q):\n n,T = q.shape\n if n==2:\n dt = 0.35\n if n==3:\n dt = 0.2\n epsilon = 1e-6\n\n iter = 1\n res = ones(n)\n J = zeros((n,n))\n\n s = linspace(0,1,T)\n\n qnew = q.copy()\n qnew = qnew / sqrt(innerprod_q2(qnew,qnew))\n\n qnorm = zeros(T)\n G = zeros(n)\n C = zeros(300)\n while (norm(res) > epsilon):\n if iter > 300:\n break\n\n # Jacobian\n for i in range(0,n):\n for j in range(0,n):\n J[i,j] = 3 * trapz(qnew[i,:]*qnew[j,:],s)\n \n J += eye(n)\n\n for i in range(0,T):\n qnorm[i] = norm(qnew[:,i])\n \n # Compute the residue\n for i in range(0,n):\n G[i] = trapz(qnew[i,:]*qnorm,s)\n \n res = -G\n\n if (norm(res) < epsilon):\n break\n\n x = solve(J,res)\n C[iter] = norm(res)\n\n delG = Basis_Normal_A(qnew)\n temp = zeros((n,T))\n for i in range(0,n):\n temp += x[i]*delG[i]*dt\n \n qnew += temp\n iter += 1\n \n qnew = qnew/sqrt(innerprod_q2(qnew,qnew))\n\n return qnew", "def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2", "def quaterion_product(q, p):\n q0 = q[3]\n p0 = p[3]\n\n return [q0*p[0:3] + p0*q[0:3] + mtimes(skew(q[0:3]), p[0:3]), q0*p0 - mtimes(q[0:3].T, p[0:3])]", "def zzX_quo_const(f, c):\n if poly_univariate_p(f):\n return zzx_quo_const(f, c)\n else:\n return [ zzX_quo_const(coeff, c) for coeff in f ]", "def expevalprime(self, p, x, y=None, C=None, sumsq=False, weights=None):\n ydp = p[1] * numpy.exp(-x / p[2]) / (p[2] * p[2])\n yd = p[0] + p[1] * numpy.exp(-x / p[2])\n if y is None:\n return (yd, ydp)\n else:\n if sumsq is True:\n return numpy.sum((y - yd) ** 2)\n else:\n return y - yd", "def curvecontrol(p1,p2, u_or_d):\r\n## four possibile orders:\r\n## A p1 lower and to left of p2\r\n## B p1 lower and to right of p2\r\n## C p1 higher and to left of p2\r\n## D p1 higher and to right of p2\r\n## B and C are reverse of each other\r\n## A and D are reverse of each other\r\n## so only 2 types of pairs really\r\n## each has a curve up or curve down possibility\r\n## start by converting D to A, and C to B\r\n e1 = 0.0001\r\n e2 = 0.9\r\n e1c = 1 - e1\r\n e2c = 0.5\r\n cp1 = []\r\n cp2 = []\r\n if p2[1] < p1[1]:\r\n resort = True\r\n ptemp = p2\r\n p2 = p1\r\n p1 = ptemp\r\n else:\r\n resort = False\r\n if p1[0] < p2[0]: ## type A\r\n if u_or_d: ## curve up\r\n cp1.append( ((p2[0]-p1[0]) * e1) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e2c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( ((p2[0]-p1[0]) * e2) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e1c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n else: ## type B\r\n if u_or_d: ## curve up\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e1))\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( p1[0] - ((p1[0]-p2[0]) * e2c))\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e2))\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( p1[0]-((p1[0]-p2[0]) * e1c))\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n if resort:\r\n ptemp = cp2\r\n cp2 = cp1\r\n cp1 = ptemp\r\n return cp1,cp2", "def algorithm_2_20(p, t, c, x):\n\n eps = 1e-14\n mu = index(x, t)\n c0 = np.array(c[mu - p:mu + 1], dtype=np.float64)\n c0 = c0[::-1]\n\n for k in range(p, 0, -1):\n for i, j in enumerate(range(mu, mu - k, -1)):\n denominator = float(t[j + k] - t[j])\n\n if abs(denominator) < eps:\n c0[i] = 0.0\n continue\n\n c0[i] = (t[j + k] - x) / denominator * c0[i + 1] + (\n x - t[j]) / denominator * c0[i]\n return c0[0]" ]
[ "0.7042587", "0.6690117", "0.65926915", "0.65250105", "0.6340643", "0.6208526", "0.6072516", "0.6010707", "0.59581405", "0.58640903", "0.5849673", "0.5834013", "0.58218116", "0.5818799", "0.5810743", "0.58106756", "0.58082443", "0.5802335", "0.57814616", "0.5777239", "0.5776059", "0.5750022", "0.5749307", "0.57333475", "0.5703115", "0.5688111", "0.5682544", "0.56630504", "0.5662124", "0.5659652" ]
0.67014515
1
Routine for computing the Bezier curve q of degree p defined by the points c.
def algorithm_1_2(p, c, x): q = np.array(c, dtype=np.float64) for k in range(1, p + 1): for j in range(0, p - k + 1): q[j] = (1 - x) * q[j] + x * q[j + 1] return q[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qCurveTo(self, *points: Tuple[float, float]) -> None:\n raise NotImplementedError", "def qspline_params(self):\n b = np.zeros(self.n-1)\n c = np.zeros(self.n-1)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n \n # Find c forward-recursively\n list = range(self.n-2)\n for i in list:\n c[i+1] = (p[i+1] - p[i] - c[i] * dx[i]) / dx[i+1]\n \n # Find c backward-recursively from 1/2c_n-1\n c[-1] = c[-1] / 2\n for i in list[::-1]:\n c[i] = (p[i+1] - p[i] - c[i+1] * dx[i+1]) / dx[i]\n\n # Find b\n for i in range(self.n-1):\n b[i] = p[i] - c[i] * dx[i]\n return b, c", "def algorithm_1_1(p, c, t, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (t[j + k] - x) / (t[j + k] - t[j]) * q[j] + (x - t[j]) / (\n t[j + k] - t[j]) * q[j + 1]\n return q[0]", "def qbezier_bounds((x0, y0), (x1, y1), (x2, y2)):\n\n\t# cubic Bezier reprsented in polynomial base\n\t# f(t) = A*t^2 + B*t + C\n\tAx = x0 - 2*x1 + x2\n\tBx = -2*x0 + 2*x1\n\tCx = x0\n\n\tAy = y0 - 2*y1 + y2\n\tBy = -2*y0 + 2*y1\n\tCy = y0\n\n\t# find extremas:\n\t#\t1) x(0) = x0\n\t#\t2) x(1) = x2\n\t#\t3) f(t_e), where f'(t_e)=0 and t_e in (0,1)\n\tx = [x0,x2]\n\tif abs(Ax) > 1e-10:\n\t\tt = -Bx/(2*Ax)\n\t\tif 0.0 < t < 1.0:\n\t\t\tt2 = t*t\n\t\t\tx.append(Ax*t2 + Bx*t + Cx)\n\n\ty = [y0,y2]\n\tif abs(Ay) > 1e-10:\n\t\tt = -By/(2*Ay)\n\t\tif 0.0 < t < 1.0:\n\t\t\tt2 = t*t\n\t\t\ty.append(Ay*t2 + By*t + Cy)\n\t\n\treturn x, y", "def DispCurve(c,x,B,a,N,K):\n return B+np.square(a*(x/(1+x+(K*((c-N)/55.5)))));", "def C2Q(self, C):\n\n return self.euler2Q(self.C2euler(C))", "def project_curve(q):\n n,T = q.shape\n if n==2:\n dt = 0.35\n if n==3:\n dt = 0.2\n epsilon = 1e-6\n\n iter = 1\n res = ones(n)\n J = zeros((n,n))\n\n s = linspace(0,1,T)\n\n qnew = q.copy()\n qnew = qnew / sqrt(innerprod_q2(qnew,qnew))\n\n qnorm = zeros(T)\n G = zeros(n)\n C = zeros(300)\n while (norm(res) > epsilon):\n if iter > 300:\n break\n\n # Jacobian\n for i in range(0,n):\n for j in range(0,n):\n J[i,j] = 3 * trapz(qnew[i,:]*qnew[j,:],s)\n \n J += eye(n)\n\n for i in range(0,T):\n qnorm[i] = norm(qnew[:,i])\n \n # Compute the residue\n for i in range(0,n):\n G[i] = trapz(qnew[i,:]*qnorm,s)\n \n res = -G\n\n if (norm(res) < epsilon):\n break\n\n x = solve(J,res)\n C[iter] = norm(res)\n\n delG = Basis_Normal_A(qnew)\n temp = zeros((n,T))\n for i in range(0,n):\n temp += x[i]*delG[i]*dt\n \n qnew += temp\n iter += 1\n \n qnew = qnew/sqrt(innerprod_q2(qnew,qnew))\n\n return qnew", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def cspline_params(self):\n b = np.zeros(self.n)\n c = np.zeros(self.n-1)\n d = np.zeros(self.n-1)\n B = np.zeros(self.n)\n Q = np.ones(self.n-1)\n D = 2 * np.ones(self.n)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n\n # Fill B\n B[0] = 3 * p[0]\n for i in range(self.n-2):\n B[i+1] = 3 * (p[i] + p[i+1] * dx[i] / dx[i+1])\n B[-1] = 3 * p[-2]\n \n # Fill D\n for i in range(self.n-2):\n D[i+1] = 2 * dx[i] / dx[i+1] + 2\n\n # Fill Q\n for i in range(self.n-2):\n Q[i+1] = dx[i] / dx[i+1]\n\n # Gauss elimination\n for i in range(1, self.n):\n D[i] = D[i] - Q[i-1] / D[i-1]\n B[i] = B[i] - B[i-1] / D[i-1]\n\n # Back-substitution\n b[-1] = B[-1] / D[-1]\n list = range(self.n-1)\n for i in list[::-1]:\n b[i] = (B[i] - Q[i] * b[i+1]) / D[i]\n\n # Calculate c and d\n for i in range(self.n-1):\n c[i] = (3 * p[i] - 2 * b[i] - b[i+1]) / dx[i]\n d[i] = (b[i] + b[i+1] - 2 * p[i]) / dx[i]\n c[-1] = -3 * d[-1] * dx[-1]\n\n return b, c, d", "def curve_bezier(numbers, p_current, relative = False):\n if len(numbers) < 2 or len(numbers) > 3:\n return None\n\n pp = [ Point(numbers[i], numbers[i+1]) \\\n for i in range(0, len(numbers), 2) ]\n if relative:\n pp = [ p + p_current for p in pp]\n\n p_list = [ p_current ]\n p_list.append(pp)\n\n return Bezier(p_list)", "def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2))\n return chisq", "def Qc_fit(x, a, b, c, d, e, f, g, h, i, k):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def Qc(I, dT, a, b, c, d, e, f, g, h, i, k):\n x1 = I # I\n x2 = dT # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def bezier_curve(points, nTimes):\n\n nPoints = len(points)\n xPoints = np.array([p[0] for p in points])\n yPoints = np.array([p[1] for p in points])\n\n t = np.linspace(0.0, 1.0, nTimes)\n\n polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])\n\n\n xvals = np.dot(xPoints, polynomial_array)\n yvals = np.dot(yPoints, polynomial_array)\n\n return xvals, yvals", "def cubic_spline_interpolation(q_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n q_ = np.transpose(q_)\n\n m = m + (m % (n-1))\n k = int(m / (n-1))\n timesteps = [np.linspace(0, 1, num = k, endpoint = False) for _ in range(n-2)]\n timesteps.append(np.linspace(0, 1, num = k))\n\n # Generate A matrix\n A = np.zeros((dof, n, n))\n # A[:, 0, 0] = 2\n # A[:, 0, 1] = 1\n # A[:, n-1, n-2] = 1\n # A[:, n-1, n-1] = 2\n A[:, 0, 0] = 1\n A[:, n-1, n-1] = 1\n for i in range(1, n-1):\n A[:, i, i - 1] = 1\n A[:, i, i] = 4\n A[:, i, i + 1] = 1\n\n # Generate b matrix\n y = np.zeros((dof, n))\n # y[:, 0] = 3 * (q_[:, 1] - q_[:, 0])\n # y[:, n-1] = 3 * (q_[:, n - 1] - q_[:, n - 2])\n y[:, 0] = 0\n y[:, n-1] = 0\n for i in range(1, n-1):\n y[:, i] = 3 * (q_[:, i + 1] - q_[:, i - 1])\n\n # Solve D\n D = np.linalg.solve(A, y)\n\n # Calculate coefficients\n a = np.copy(q_[:, :n-1])\n b = np.copy(D[:, :n-1])\n c = np.zeros((dof, n-1))\n d = np.zeros((dof, n-1))\n for i in range(0, n-1):\n c[:, i] = 3 * (q_[:, i + 1] - q_[:, i]) - 2 * D[:, i] - D[:, i + 1]\n d[:, i] = 2 * (q_[:, i] - q_[:, i + 1]) + D[:, i] + D[:, i + 1]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n for j in range(n - 1):\n for i in range(len(timesteps[j])):\n t = timesteps[j][i]\n t_2 = t * t\n t_3 = t * t * t\n\n q[:, i + j * k] = a[:, j] + b[:, j] * t + c[:, j] * t_2 + d[:, j] * t_3\n qd[:, i + j * k] = b[:, j] + 2 * c[:, j] * t + 3 * d[:, j] * t_2\n qdd[:, i + j * k] = 2 * c[:, j] + 6 * d[:, j] * t\n\n return q, qd, qdd", "def clfqp(self,x,p):\n alp = self.alp_opt\n nu = self.nu_opt\n dt = self.dt\n n = self.n\n I = np.identity(n)\n M = self.ncm(x,p)\n nu = np.size(self.h_or_g(x,p),1)\n u = cp.Variable((nu,1))\n e = np.reshape(x,(n,1))\n fx = np.reshape(self.dynamicsf(x,p),(n,1))\n gx = self.h_or_g(x,p)\n dMdt = (nu*I-M)/dt\n constraints = [2*e.T@(fx+gx@u)+e.T@dMdt@e <= -2*alp*e.T@M@e]\n prob = cp.Problem(cp.Minimize(cp.sum_squares(u)),constraints)\n prob.solve()\n u = u.value\n u = np.ravel(u)\n return u", "def cubic_trajectory_planning(q0, qf, qd0, qdf, m = 100):\n n = q0.shape[0]\n\n # Polynomial Parameters\n a0 = np.copy(q0)\n a1 = np.copy(qd0) \n a2 = 3 * (qf - q0) - 2 * qd0 - qdf\n a3 = -2 * (qf - q0) + qd0 + qdf\n\n timesteps = np.linspace(0, 1, num = m)\n\n q = np.zeros((n, m))\n qd = np.zeros((n, m))\n qdd = np.zeros((n, m))\n\n for i in range(len(timesteps)):\n t = timesteps[i]\n t_2 = t * t\n t_3 = t * t * t\n\n q[:, i] = (a0) + (a1 * t) + (a2 * t_2) + (a3 * t_3)\n qd[:, i] = (a1) + (2 * a2 * t) + (3 * a3 * t_2)\n qdd[:, i] = (2 * a2) + (6 * a3 * t)\n\n return q, qd, qdd", "def curvecontrol(p1,p2, u_or_d):\r\n## four possibile orders:\r\n## A p1 lower and to left of p2\r\n## B p1 lower and to right of p2\r\n## C p1 higher and to left of p2\r\n## D p1 higher and to right of p2\r\n## B and C are reverse of each other\r\n## A and D are reverse of each other\r\n## so only 2 types of pairs really\r\n## each has a curve up or curve down possibility\r\n## start by converting D to A, and C to B\r\n e1 = 0.0001\r\n e2 = 0.9\r\n e1c = 1 - e1\r\n e2c = 0.5\r\n cp1 = []\r\n cp2 = []\r\n if p2[1] < p1[1]:\r\n resort = True\r\n ptemp = p2\r\n p2 = p1\r\n p1 = ptemp\r\n else:\r\n resort = False\r\n if p1[0] < p2[0]: ## type A\r\n if u_or_d: ## curve up\r\n cp1.append( ((p2[0]-p1[0]) * e1) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e2c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( ((p2[0]-p1[0]) * e2) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e1c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n else: ## type B\r\n if u_or_d: ## curve up\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e1))\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( p1[0] - ((p1[0]-p2[0]) * e2c))\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e2))\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( p1[0]-((p1[0]-p2[0]) * e1c))\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n if resort:\r\n ptemp = cp2\r\n cp2 = cp1\r\n cp1 = ptemp\r\n return cp1,cp2", "def algorithm_4_10(p, tau, t, c):\n\n m = len(t) - (p + 1)\n n = len(tau) - (p + 1)\n c = np.array(c, dtype=np.float64)\n t = np.array(t, dtype=np.float64)\n tau = np.array(tau, dtype=np.float64)\n b = np.zeros(m)\n\n for i in range(m):\n mu = index(t[i], tau)\n if p == 0:\n b[i] = c[mu]\n else:\n C = c[mu - p:mu + 1]\n for j in range(0, p):\n k = p - j\n tau1 = tau[mu - k + 1:mu + 1]\n tau2 = tau[mu + 1:mu + k + 1]\n omega = np.divide(\n (t[i + k] - tau1), (tau2 - tau1),\n out=np.zeros_like(tau1),\n where=((tau2 - tau1) != 0))\n C = (1 - omega) * C[:-1] + omega * C[1:]\n b[i] = C\n return b", "def C(self, q, dq):\n # check for function in dictionary\n if self._C is None:\n self._C = self._calc_C()\n parameters = tuple(q) + tuple(dq)\n return np.array(self._C(*parameters), dtype='float32')", "def pchisq(x, df):\n \n if df % 2 == 0:\n dchi = 0.5 * math.exp(-0.5 * x)\n f = 1.0 - 2.0 * dchi\n for i in range(4, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n else:\n f = 2.0 * pnorm(math.sqrt(x), 0.0, 1.0) - 1.0\n dchi = math.exp(-0.5 * x) / math.sqrt(2.0 * math.pi * x)\n for i in range(3, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n return f", "def solve_quadratic_equation(a,b,c):\r\n delta = b**2 - 4 * a * c\r\n if delta < 0:\r\n raise ValueError(\"No real solution...that shouldn't happen!\")\r\n return (-b + math.sqrt(delta)) / (2*a)", "def createCubicBezier(self):\n return _libsbml.Curve_createCubicBezier(self)", "def ea_from_q(p, q):\n return p * q / (0.622 + 0.378 * q)", "def q_from_ea(ea, p):\n return 0.622 * ea / (p - 0.378 * ea)", "def optimalFraction(p,c):\r\n if p > 1/c:\r\n opt = (p*c-1)/(c-1)\r\n else:\r\n opt = 0\r\n return opt", "def add(self, P, Q):\n if not (isinstance(P, list) and isinstance(Q, list)):\n raise ValueError(\"point P (resp. Q) must be [px, py] (resp. [qx, qy])\")\n #if not (self.whetherOn(P) and self.whetherOn(Q)):\n # raise ValueError(\"either points must not be point on curve.\")\n\n if (P == self.infpoint) and (Q != self.infpoint):\n return Q\n elif (P != self.infpoint) and (Q == self.infpoint):\n return P\n elif (P == self.infpoint) and (Q == self.infpoint):\n return self.infpoint\n\n if self.ch == 0:\n # FIXME\n if P[0] == Q[0]:\n if P[1]+Q[1]+self.a1*Q[0]+self.a3 == 0:\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1]-P[1])/(Q[0]-P[0])\n t = (P[1]*Q[0]-Q[1]*P[0])/(Q[0]-P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R\n else:\n if not (P[0] - Q[0]):\n # FIXME: the condition is P[0] == Q[0] intuitively,\n # but sometimes there are int vs FiniteFieldElement\n # comparisons ...\n if not (P[1]+Q[1]+self.a1*Q[0]+self.a3):\n return self.infpoint\n else:\n s = (3*P[0]**2+2*self.a2*P[0]+self.a4-self.a1*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n t = (-P[0]**3+self.a4*P[0]+2*self.a6-self.a3*P[1])/(2*P[1]+self.a1*P[0]+self.a3)\n else:\n s = (Q[1] - P[1]*self.basefield.one) / (Q[0] - P[0])\n t = (P[1]*Q[0] - Q[1]*P[0]*self.basefield.one)/ (Q[0] - P[0])\n x3 = s**2+self.a1*s-self.a2-P[0]-Q[0]\n y3 = -(s+self.a1)*x3-t-self.a3\n R = [x3, y3]\n return R", "def a_q(self, phi, ci, tl):\n\t return (self.j(phi, tl)*(ci - self.gamma(tl)))/(4.*(ci + 2.*self.gamma(tl)))", "def quadratic(self, a, b, c):\n if a == 0 and b == 0:\n return [\"inf\"]\n elif a == 0 and b != 0:\n # form b*y + c = 0\n return [-c / b]\n elif (b ** 2 - 4 * a * c) > 0:\n # standard quadradic formula\n return [\n (-b + (b ** 2 - 4.0 * a * c) ** 0.5) / (2.0 * a),\n (-b - (b ** 2 - 4.0 * a * c) ** 0.5) / (2.0 * a),\n ]\n elif (b ** 2 - 4 * a * c) == 0:\n return [-b / (2.0 * a)]\n else:\n # only interested in real solutions, so toss out imaginary ones\n runLog.warning(\"warning no intercepts\")\n return [None]", "def solve_quadratic(a, b, c):\n assert a != 0\n d = b * b - 4 * a * c\n if d < 0:\n return []\n elif d == 0:\n return -b / (2 * a)\n else:\n return [\n (-b - math.sqrt(d)) / (2 * a),\n (-b + math.sqrt(d)) / (2 * a)\n ]" ]
[ "0.6601559", "0.64687204", "0.6417432", "0.6127228", "0.609416", "0.594592", "0.59145063", "0.5885624", "0.5851079", "0.58369726", "0.5807348", "0.58004904", "0.5773123", "0.5691614", "0.56837523", "0.5677718", "0.5676558", "0.5676219", "0.5674574", "0.56641537", "0.56488764", "0.56367624", "0.5622623", "0.56120396", "0.5603083", "0.5601434", "0.55939656", "0.5591838", "0.5589074", "0.558359" ]
0.65151274
1
last_week=datetime.date.today()timedelta(days=7) import pdb;pdb.set_trace() fans_list = ["wwwttshow", "ttshowpet", "draw.fans", "TTShowMusic", "GoodNews.FANS"] fans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_week, date__lte=datetime.date.today()).order_by("date") start = fans_pages[0] last = fans_pages[len(fans_pages) 1] talk_about_is = (last.talk_about_is start.talk_about_is) / (start.talk_about_is + 0.0) 100 talk_about_is = (last.talk_about_is start.talk_about_is) total_like_count = (last.total_like_count start.total_like_count) / (start.total_like_count + 0.0) 100 total_like_count = (last.total_like_count start.total_like_count) total_fans = (last.total_fans start.total_fans) / (start.total_fans + 0.0) 100 total_fans = (last.total_fans start.total_fans)
def week_report_handle(fans_type): #import pdb;pdb.set_trace() last_day = datetime.date.today()-timedelta(days=datetime.datetime.today().weekday() + 1) today = datetime.date.today() fans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_day, date__lte=today).order_by("date") start = fans_pages[0] last = fans_pages[len(fans_pages) - 1] #talk_about_is = (last.talk_about_is - start.talk_about_is) / (start.talk_about_is + 0.0) * 100 talk_about_is = (last.talk_about_is - start.talk_about_is) #total_like_count = (last.total_like_count - start.total_like_count) / (start.total_like_count + 0.0) * 100 total_like_count = (last.total_like_count - start.total_like_count) #total_fans = (last.total_fans - start.total_fans) / (start.total_fans + 0.0) * 100 total_fans = (last.total_fans - start.total_fans) return {"talk_about_is":talk_about_is, "total_like_count":total_like_count, "total_fans":total_fans, "start":start.date, "last":last.date}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def month_report_handle(fans_type):\n\tstart = datetime.date.today() - timedelta(days=datetime.date.today().day - 1)\n\ttoday = datetime.date.today()\n\t#import pdb;pdb.set_trace()\n\t#fans_list = [\"wwwttshow\", \"ttshowpet\", \"draw.fans\", \"TTShowMusic\", \"GoodNews.FANS\"]\n\tfans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=start, date__lte=today).order_by(\"date\")\n\n\tstart = fans_pages[0]\n\tlast = fans_pages[len(fans_pages) - 1]\n\n\ttalk_about_is = (last.talk_about_is - start.talk_about_is)\n\ttotal_like_count = (last.total_like_count - start.total_like_count)\n\ttotal_fans = (last.total_fans - start.total_fans)\n\treturn {\"talk_about_is\":talk_about_is, \"total_like_count\":total_like_count, \"total_fans\":total_fans, \"start\":start.date, \"last\":last.date}", "def find_winner(request):\n r = {}\n\n test = False\n if request.POST.get('test','0') == '1':\n test = True \n\n if request.POST.get('code','000') == 'ch00seW199Er':\n # check the number of people who transacted today\n d = date.today() #-timedelta(30)\n win_begin = datetime(year=d.year, month=d.month, day=d.day,\n hour=0, minute=0, second=0)\n win_end = datetime(year=d.year, month=d.month, day=d.day,\n hour=23, minute=59, second=59)\n\n # check the number of people who logged in today\n logged_in = Event.objects.filter(action=Event.LOGIN,timestamp__gt=win_begin, timestamp__lt=win_end).values('user').distinct()\n logger.debug(\"People that used: %s\"%str(logged_in))\n \n # check the number of people who saw the feed today\n feed_viewed = FeedEvent.objects.filter(action=FeedEvent.FEED,timestamp__gt=win_begin, timestamp__lt=win_end).values('user').distinct()\n logger.debug(\"People that used the feed: %s\"%str(feed_viewed))\n\n # check the number of people who have reviewed\n reviewed = Receipt.objects.filter(last_update__lt=win_end, last_update__gt=win_begin).values_list(\"txn__user\", flat=True).distinct()\n logger.debug(\"People that reviewed: %s\"%str(reviewed))\n\n # exclude previous winners\n prev_winners = Winner.objects.all().values('user__id')\n #.exclude(id__in=prev_winners)\n\n r_start = Q(techcashtransaction__receipt__last_update__gt=win_begin)\n r_end = Q(techcashtransaction__receipt__last_update__lt=win_end)\n t_start = Q(techcashtransaction__timestamp__gt=win_begin)\n t_end = Q(techcashtransaction__timestamp__lt=win_end)\n f_viewed = Q(id__in=feed_viewed)\n\n users_today = OTNUser.objects.filter(f_viewed | (r_start & r_end) | (t_start & t_end)).order_by('id').distinct()\n logger.debug(\"People that made txns (%d): %s\"%(users_today.count(), str(users_today)))\n\n # randomly select\n winner_id=-1\n if users_today.count() == 0:\n return JSONHttpResponse({'result':'0'})\n elif users_today.count() == 1:\n winner_id = users_today[0].id\n winner = users_today[0]\n else:\n # exclude Kwan, John McDonald, Dawei Shen, Alter Yod\n exclude_list=[-1, 2, 3, 5]\n while winner_id in exclude_list:\n draw = randint(0,users_today.count()-1)\n winner = users_today[draw] \n winner_id = winner.id\n \n if not test:\n # save to DB\n win_prize = Winner(user=winner, prize=\"$5 TechCASH\")\n win_prize.save()\n \n # if called the day after\n win_prize.timestamp = d \n win_prize.save()\n\n # email mitcard to credit from DIGRECPT1 to MIT ID\n msg = \"%s is today's OTN/MealTime winner!\\n\\nPlease transfer $5 from DIGRECPT1 to %s.\\n\\n-kwan\"%(winner.name, winner.mit_id)\n send_mail('OTN Winner', msg, '[email protected]', ['[email protected]'], fail_silently=False) \n\n # email winner\n msg = \"You are today's MealTime winner!\\n\\nYou will receive $5 credit in your TechCASH account.\\n\\n-kwan\"\n send_mail('OTN Winner', msg, '[email protected]', [winner.my_email, '[email protected]'], fail_silently=False) \n\n r['result'] = {'name':winner.name, 'id':\"%d\"%winner.id}\n else:\n r['result'] = '-1'\n\n return JSONHttpResponse(r)", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def get(self, request, *args, **kwargs):\n review_type = request.GET.get(\"type\", \"blood_sample\")\n page = int(request.GET.get('page', 1))\n # try:\n # day = request.GET.get(\n # 'day', datetime.datetime.today().strftime('%B %d, %Y'))\n # except:\n # day = request.GET.get(\n # 'day', datetime.datetime.today().strftime('%b. %d, %Y'))\n table = request.GET.get('table', 'False')\n day, days = UploadView.get_dayformated_and_days(self, request)\n\n if review_type == \"blood_sample\":\n\n blood_samples_imported = BloodSampleImport.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n # Checking latest uploaded sample file is reviewed or not\n if blood_samples_imported.count() > 0:\n sample_import_latest = blood_samples_imported.last()\n if not sample_import_latest.Reviewed:\n sample_import_latest.Reviewed = True\n sample_import_latest.save()\n\n if request.GET.get('firstOpen', 'False') == \"True\":\n if not day < datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0):\n day = BloodSample.objects.all().order_by('-CreatedAt').first().CreatedAt\n days = [(day - datetime.timedelta(days=x))\n for x in range(4)]\n days.reverse()\n # import ipdb\n # ipdb.set_trace()\n query_results = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0))).order_by('CreatedAt', 'CohortId', 'Barcode')\n if query_results.count() == 0 and request.GET.get('firstOpen', 'False') == \"True\":\n day = BloodSample.objects.all().order_by('-CreatedAt').first().CreatedAt\n days = [(day - datetime.timedelta(days=x))\n for x in range(4)]\n days.reverse()\n query_results = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0))).order_by('CreatedAt', 'CohortId', 'Barcode')\n # query_results = BloodSample.objects.filter(\n # ImportId__in=blood_samples_imported.values_list('id', flat=True)[::1]).order_by('id')\n\n paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)\n\n if table == \"True\":\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n return render(request, self.blood_sample_review_table_template, {\n \"objects\": results.object_list,\n \"current_page\": page,\n \"class\": 'reviewBloodDay',\n \"total_pages\": paginator.num_pages\n })\n\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n return render(request, self.blood_sample_review_template, {\n \"current_page\": page,\n \"total_pages\": paginator.num_pages,\n \"days\": days,\n \"active\": day,\n \"shownextday\": shownextday,\n \"class\": 'reviewBloodDay',\n })\n\n if review_type == \"manifest\":\n manifest_imported = ManifestImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n query_results = ManifestRecords.objects.filter(\n ImportId__in=manifest_imported.values_list('id', flat=True)[::1]).order_by('id')\n\n paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)\n\n if table == \"True\":\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n\n cursor.execute('''\n SELECT * FROM \"blood_sample_bloodsample\"\n INNER JOIN \"blood_sample_manifestrecords\" ON ( \"blood_sample_bloodsample\".\"CohortId\" = \"blood_sample_manifestrecords\".\"CohortId\" )\n ''')\n\n # row = cursor.fetchall()\n columns = [col[0] for col in cursor.description]\n data = [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n # import ipdb\n # ipdb.set_trace()\n # print(row)\n\n # BloodSample.objects.filter(\n # CohortId__in=results.object_list.values_list('CohortId', flat=True)[::1])\n\n return render(request, self.manifest_review_table_template, {\n \"objects\": results.object_list,\n # \"blood_sample_objects\": BloodSample.objects.filter(\n # CohortId__in=results.object_list.values_list('CohortId', flat=True)[::1]),\n \"current_page\": page,\n \"total_pages\": paginator.num_pages\n })\n\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n return render(request, self.manifest_review_template, {\n \"current_page\": page,\n \"total_pages\": paginator.num_pages,\n \"days\": days,\n \"active\": day,\n \"shownextday\": shownextday,\n \"class\": 'reviewManifestDay',\n })", "def designTest(request):\n\n MAX_NEWS = 10\n start_id = '0'\n end_id = string.atoi(start_id) + 10\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"-date\")[start_id:end_id] # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n # Vypocet prvniho ID z predchozi skupiny novinek (jedna skupina = MAX_NEWS) \n start_id_num = string.atoi(start_id)\n if (start_id_num + MAX_NEWS) < news_count:\n preview_start_id = start_id_num + MAX_NEWS\n else:\n preview_start_id = start_id_num\n\n # Vypocet prvniho ID z nasledujici skupiny novinek (jedna skupina = MAX_NEWS) \n next_start_id = start_id_num - MAX_NEWS # prvni ID nasledujicich novinek\n if next_start_id < 0:\n next_start_id = 0;\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek, 'start_id': start_id,\n 'preview_start_id': preview_start_id, 'next_start_id': next_start_id}\n return render(request, 'designTest/news_design_test.html', context)", "def calculate(request):\n inicio = MeasureDataView.objects.latest('utimestamp').utimestamp\n final = inicio - timedelta(hours=1) -timedelta(minutes=1)\n\n\n '''Agregar hallar el maximo y el minimo de las doce horas '''\n minmax_inicio= inicio\n minmax_final = inicio - timedelta(hours=12)\n\n c_max = MeasureDataView.objects.all().filter(utimestamp__range=[final,inicio]).aggregate(Max('data'))['data__max']\n c_min = MeasureDataView.objects.all().filter(utimestamp__range=[final,inicio]).aggregate(Min('data'))['data__min']\n\n\n weight_prima = c_min/c_max\n\n if weight_prima > 0.5:\n weight = weight_prima\n else:\n weight = 0.5\n\n cList = []\n ci = []\n cTags = []\n for i in range(12):\n cTags.append(inicio)\n cTags.append(final)\n cList.append(MeasureDataView.objects.all().filter(utimestamp__range=[final,inicio]))\n ci.append( MeasureDataView.objects.all().filter(utimestamp__range=[final,inicio]).aggregate(Avg('data'))['data__avg'])\n inicio = final\n final = inicio - timedelta(hours=1) -timedelta(minutes=1)\n\n c = []\n for i in ci:\n if i is None :\n c.append(0)\n else:\n c.append(i*1000*0.38)\n\n\n i=1\n sum_inf=0\n sum_sup=0\n\n for x in c:\n sum_sup += pow(weight,i-1)*x\n sum_inf += pow(weight,i-1)\n i=i+1\n\n y = sum_sup/sum_inf\n\n\n\n if y>=0.0 and y<=12.0 :\n clow=0\n chigh=12.0\n ilow=0\n ihigh=50\n message= 'Good'\n elif y>=12.1 and y<=35.4 :\n clow=12.1\n chigh=35.4\n ilow=51\n ihigh=100\n message= 'Moderate'\n elif y>=35.5 and y<=55.4 :\n clow=35.5\n chigh=55.4\n ilow=101\n ihigh=150\n message = 'Unhealthy for Sensitive Groups'\n elif y>=55.5 and y<=150.4 :\n clow=55.5\n chigh=150.4\n ilow=151\n ihigh=200\n message = 'Unhealthy'\n elif y>=0.0 and y<=12.0 :\n clow=150.5\n chigh=250.4\n ilow=201\n ihigh=300\n message = 'Very Unhealthy'\n elif y>=250.5 and y<=350.4 :\n clow=250.5\n chigh=350.4\n ilow=301\n ihigh=400\n message = 'Hazardous'\n elif y>=350.5 and y<=500.4 :\n clow=350.5\n chigh=500.4\n ilow=401\n ihigh=500\n message = 'Hazardous'\n else:\n x\n\n\n aqi = ((y-clow)/(chigh-clow))*(ihigh-ilow) + ilow\n\n\n\n template = loader.get_template('polls/nowcast.html')\n context = {\n\n 'c_raw_set' : cList,\n 'c_tags' : cTags,\n 'c_set' :c,\n 'c_max' : max(c),\n 'c_min' :min(c),\n 'w_p' : weight_prima,\n 'w' :weight,\n 'nowcast' : y,\n 'nowcast_message' : message,\n 'aqi' : aqi\n }\n\n\n\n return HttpResponse(template.render(context, request))", "def grabBlogPostAnalysisStarted(self): #$NON-NLS-1$\r", "def posts_info(self,soup,Urls_list,Likes,URLS,Date):\n \n while 1:\n time.sleep(0.2)\n post=soup.find_all('div',class_=\"by\") \n for i in post:\n l=i.find('span',id=re.compile(\"like_\"))\n Hr=i.find('a',href=re.compile(\"#footer_action_list\"))\n if Hr==None:\n Hr=i.find('a',href=re.compile(\"/story.php\"))\n \n \n d=i.find('abbr')\n \n if Hr!=None:\n Href=Hr['href']\n Href=Href.replace('https://m.facebook.com','')\n Href=Href.replace('https://mbasic.facebook.com','') \n Urls_list.append(Href)\n if d !=None:\n date=d.get_text()\n Date.append(date)\n else:\n Date.append('None')\n \n if l!=None: \n if l.get_text()!=None:\n likes=l.get_text()\n if likes==\"Like · React\":\n likes='0'\n else:\n likes=likes.replace('· Like · React','') \n likes=likes.replace(\"· Like\",'')\n likes=likes.replace(\"· Love\",'')\n likes=likes.replace(\"· Haha\",'')\n likes=likes.replace(\"· Care\",'')\n likes=likes.replace(\"· Wow\",'')\n likes=likes.replace(\"· Angry\",'')\n Likes.append(likes)\n else:\n Likes.append(\"0\")\n else:\n Likes.append(\"0\")\n \n \n more=self.more_page(soup)\n if more !=None:\n soup=self.get_page(more,session)\n \n else:\n break\n \n Urls_list,URLS=self.clean_url(Urls_list,URLS) \n \n return Urls_list,URLS,Likes,Date", "def get_queryset(self):\n day = self.request.query_params.get('day',None)\n tz = pytz.timezone('Europe/London') \n dt_now = datetime.now(tz=tz) - timedelta(minutes=10) \n day_name_today = dt_now.strftime(\"%A\")\n queryset = OnlineMeeting.objects.annotate(search=SearchVector('description','title'),)\n queryset = queryset.filter(published=True)\n\n search = self.request.query_params.get('search', None)\n\n if search is not None and len(search) > 0:\n queryset = queryset.filter(search=search)\n\n now = self.request.query_params.get('now',None)\n top = int(self.request.query_params.get('top',0))\n if day=='now':\n \n \n date_today = dt_now.date()\n time_now = dt_now.time()\n datetime_now = datetime.combine(date_today,time_now)\n \n tomorrow = dt_now + timedelta(days=1) \n day_name_tomorrow = tomorrow.strftime(\"%A\")\n \n meetings_today = OnlineMeeting.objects.filter(((Q(day=day_name_today) | Q(day='All')) & Q(time__gte=dt_now.time())))#.order_by('time')\n meetings_tomorrow = OnlineMeeting.objects.filter((Q(day=day_name_tomorrow) & Q(time__lte=dt_now.time())))#.order_by('time')\n \n\n all = meetings_today #| meetings_tomorrow\n if day_name_today == 'sunday':\n all_ordered = all.order_by('time')\n else:\n all_ordered = all.order_by('time')\n \n if top:\n all_ordered = all_ordered[:top]\n return all_ordered#.annotate(the_rank=rank_by_day)\n elif day in ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']:\n queryset = queryset.filter(Q(day=day) | Q(day='All') )\n \n \n \n \n \n return queryset.order_by('time')", "def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})", "def task_tracking(request):\n task_name = [] \n date_text = request.GET.get('date', datetime.today().strftime(date_format))\n try:\n date = datetime.strptime(date_text.strip('/'), date_format)\n except ValueError:\n return HttpResponseNotFound(pagenotfound_msg)\n# if date > (datetime.today()).date():\n# return HttpResponseNotFound(pagenotfound_msg)\n# elif date < (datetime.today()).date() + relativedelta(months=-1):\n# return HttpResponseForbidden(forbidden_msg)\n# selected_tasks1 = Task.objects.filter(id__in = cache.get(request.user.pk, []))\n selected_tasks = TaskTrackingNew.objects.filter(user = request.user)\n project_set = Project.objects.filter(Q(apex_body_owner=request.user, is_active = True) |\n Q(owner=request.user, is_active = True) |\n Q(team=request.user, is_active = True) | Q(requested_by = request.user, is_active = True)).distinct().exclude(cancel = True)\n project_id = request.GET.get('project')\n nonprojecttask = Task.objects.filter(project=None)\n for each_pjt in project_set:\n task = Task.objects.filter(project= each_pjt.id).filter(assigned_resources = request.user) \n sel_task = [] \n each_pjt.__dict__.update({'Task':task,'sel_task':sel_task})\n tasks = serializers.serialize('json', task, fields=('name','id'))\n json = simplejson.dumps(tasks)\n dtstart = datetime( 1900, 01, 01,0, 0, 0, 0 )\n dtend = datetime( 1900, 01, 01, 23, 0, 0, 0 )\n times = []\n times.append(dtstart.strftime(\"%H.%M\"))\n while dtstart <= dtend:\n dtstart = dtstart + timedelta(minutes=30)\n times.append(dtstart.strftime(\"%H.%M\"))\n if request.method == 'POST': \n dif = TaskTrackingNew.objects.filter(date = date,user=request.user)\n dif.delete()\n for element in range(1, 5):\n for each in project_set: \n task = request.POST.get(str(each.id)+'_task'+str(element))\n if task == '' or task == None :\n continue\n Timefrom = request.POST.get(str(each.id)+'_Timefrom' + str(element))\n if Timefrom == '' or Timefrom == None :\n continue\n Timeto = request.POST.get(str(each.id)+'_Timeto' + str(element)) \n if Timeto == '' or Timeto == None :\n continue\n start_dt = dt.datetime.strptime(Timefrom, '%H.%M')\n end_dt = dt.datetime.strptime(Timeto, '%H.%M')\n diff = (end_dt - start_dt)\n diff.seconds/60 \n date_text = request.POST.get('date','')\n date = datetime.strftime(datetime.strptime(str(date_text.strip('/')),'%m/%d/%Y'),'%Y-%m-%d')\n project_dict = ({'user_id':request.user.id,\n 'project_id' :each.id,\n 'task_id':task,\n 'time_from':Timefrom,\n 'time_to':Timeto,\n 'time_spent':diff,\n 'date':date,\n })\n time_save = TaskTrackingNew(**project_dict)\n time_save.save() \n task = request.POST.get('0_task' + str(element))\n if task == '' or task == None :\n continue\n Timefrom = request.POST.get('0_Timefrom' + str(element))\n if Timefrom == '' or Timefrom == None :\n continue\n Timeto = request.POST.get('0_Timeto' + str(element))\n if Timeto == '' or Timeto == None :\n continue\n start_dt = dt.datetime.strptime(Timefrom, '%H.%M')\n end_dt = dt.datetime.strptime(Timeto, '%H.%M')\n diff = (end_dt - start_dt)\n diff.seconds/60 \n date_text = request.POST.get('date','')\n date = datetime.strftime(datetime.strptime(str(date_text.strip('/')),'%m/%d/%Y'),'%Y-%m-%d') \n nonproject_dict = ({'user_id':request.user.id,\n 'project_id' :'0',\n 'task_id':task,\n 'time_from':Timefrom,\n 'time_to':Timeto,\n 'time_spent':diff,\n 'date':date,\n }) \n timesheet_save = TaskTrackingNew(**nonproject_dict)\n timesheet_save.save()\n return HttpResponseRedirect('/timesheetnew/edit/?date='+date_text)\n return render_to_response('timesheet2.html', {'projects' : project_set,'each_pjt' : each_pjt,'sel_non_pjt_tsk': [],'Task' : nonprojecttask,'times':times,'date_text': date_text }, context_instance = RequestContext(request))", "def create_reports():\n \n date_now = datetime.now()\n for report in Report.objects.filter(activated=True):\n\t\n\tif report.last_report == None or report.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[report.interval]):\n\t #if report is now so do not execute it times \n\t if report.last_report != None and report.interval == 'n':\n\t\tcontinue\n\t if report.date_to != None and report.date_to < date_now:\n\t\tcontinue\n\t \n\t # check if query is good\n\t check_ok, db_query = check_query(report)\n\t if not check_ok:\n\t\tcontinue\n\t \n\t # check if date patterns are in query\n\t date_pattern_from = string.find(db_query, \"${{d1}}\")\n\t date_pattern_to = string.find(db_query, \"${{d2}}\")\n\t if date_pattern_from != -1:\n\t\tdate_from = date_now - timedelta( seconds=PERIOD_CHOICES[report.interval])\n\t else:\n\t\tdate_from = None\n\t if date_pattern_to != -1:\n\t\tdate_to = date_now\n\t else:\n\t\tdate_to = None\n\n\t # excute reports for past periods\n\t if not execute_past_reports(report, db_query, date_from, date_to, date_now):\n\t\tcontinue\n\n\t # execute query for this time\n\t if date_from != None:\n\t\tdb_query = string.replace(db_query, \"${{d1}}\", \"new Date(%s,%s,%s)\" % (date_from.year, date_from.month - 1, date_from.day))\n\t if date_to != None:\n\t\tdb_query = string.replace(db_query, \"${{d2}}\", \"new Date(%s,%s,%s)\" % (date_to.year, date_to.month - 1, date_to.day))\n\n\t if not execute_query(db_query, report, date_now):\n\t\tprint \"error - unsupported query: report title: %s, id: \" % (report.title, report.id)\n\t\tcontinue\n\n return True", "def get_blogs(request):\n address = request.POST.get('address')\n\n results = {\n \"sub\": [],\n \"mine\": [],\n \"browse\": []\n }\n my_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from=address).order_by('-time')\n for m in my_blogs:\n results['mine'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n my_sub_ids = [s.address for s in Subscription.objects.all()]\n\n sub_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from__in=my_sub_ids).order_by(\"-time\")\n for m in sub_blogs:\n results['sub'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n browsable_blogs = {}\n browse_blogs_db = Blog.objects.filter(~Q(address_from__in=my_sub_ids)).order_by('-time')\n for m in browse_blogs_db:\n if m.address_from not in browsable_blogs:\n browsable_blogs[m.address_from] = {\n \"address_from\": m.address_from,\n \"latest_post_time\": m.time,\n \"total_posts\": 1\n }\n else:\n browsable_blogs[m.address_from]['total_posts'] += 1\n\n results['browse'] = sorted(browsable_blogs.values(), key=lambda k: k['latest_post_time'])\n\n return HttpResponse(json.dumps({\n \"status\": \"success\",\n \"data\": results\n }, default=helpers.json_custom_parser), content_type='application/json')", "def get(self, request, *args, **kwargs):\n p_period_str = kwargs['p_period']\n l_best_media = None\n liked_photos = None\n p_period_verbose = None\n\n try:\n p_period = int(p_period_str)\n except:\n raise\n\n if p_period == 0:\n p_period_verbose = _('today')\n if p_period == 1:\n p_period_verbose = _('1 day ago')\n if p_period == 2:\n p_period_verbose = str(p_period) + _(' days ago')\n\n\n date_from = datetime.today() - timedelta(days=p_period+1)\n date_from_to = datetime.today() - timedelta(days=p_period)\n\n # Common for all members views ===================================================\n l_categories = Category.objects.all()\n l_attributes = Attribute.objects.all()\n try:\n logged_member = Member.objects.get(django_user__username=request.user)\n show_describe_button = logged_member.is_editor(request)\n is_monthly_member = logged_member.is_monthly_member()\n is_yearly_member = logged_member.is_yearly_member()\n except ObjectDoesNotExist:\n logged_member = None\n except:\n raise HttpResponseNotFound\n\n\n # END Common for all members views ===============================================\n #l_squarefollowing_queryset = SquareFollowing.objects.all()\n l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n\n l_token = logged_member.get_member_token(request)\n instagram_session = InstagramSession(p_is_admin=False, p_token=l_token['access_token'])\n instagram_session.init_instagram_API()\n #l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n l_smart_feed_helper = SmartFeedHelper(\n p_feed_owner_instagram_id=logged_member.instagram_user_id,\n p_instagram_session=instagram_session,\n p_batch_size=SMART_FEED_BATCH_SIZE,\n p_min_id=None,\n p_date_from=date_from,\n p_date_to=date_from_to\n )\n l_best_media = l_smart_feed_helper.find_best_media(\n p_media_to_return=SMART_FEED_BATCH_SIZE,\n p_starting_media_id=None,\n p_logged_member=logged_member,\n p_max_days=30\n )\n\n liked_photos = []\n for x_media in l_best_media:\n my_likes = MyLikes(request.user.username, x_media.id, instagram_session )\n has_user_liked_media, no_of_likes = my_likes.has_user_liked_media()\n if has_user_liked_media:\n liked_photos.extend([x_media.id])\n\n\n # Limit calculation --------------------------------------------------------------\n logged_member.refresh_api_limits(request)\n x_ratelimit_remaining, x_ratelimit = logged_member.get_api_limits()\n\n x_ratelimit_used = x_ratelimit - x_ratelimit_remaining\n if x_ratelimit != 0:\n x_limit_pct = (x_ratelimit_used / x_ratelimit) * 100\n else:\n x_limit_pct = 100\n # END Limit calculation ----------------------------------------------------------\n\n return render(request,\n self.template_name,\n dict(\n best_media=l_best_media,\n liked_photos=liked_photos,\n period_verbose=p_period_verbose,\n period_number=p_period,\n squarefollowings_count=l_squarefollowings_count,\n new_friends_interaction=0,\n\n is_monthly_member=is_monthly_member,\n is_yearly_member=is_yearly_member,\n logged_member=logged_member,\n x_ratelimit_remaining=x_ratelimit_remaining,\n x_ratelimit=x_ratelimit,\n x_limit_pct=x_limit_pct,\n categories=l_categories,\n attributes=l_attributes,\n )\n )", "def get(self, request, *args, **kwargs):\n day, days = self.get_dayformated_and_days(request)\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n\n blood_samples_loaded = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n blood_samples_imported = BloodSampleImport.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n blood_samples_imported_cnt = BloodSample.objects.filter(\n ImportId__in=blood_samples_imported.values_list('id', flat=True)[::1]).count()\n try:\n reviewed = blood_samples_imported.last().Reviewed\n except:\n reviewed = False\n\n manifest_imported = ManifestImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n no_of_files_uploaded = manifest_imported.count()\n manifest_imported_cnt = ManifestRecords.objects.filter(\n ImportId__in=manifest_imported.values_list('id', flat=True)[::1]).count()\n\n manifest_loaded = ManifestRecords.objects.filter(\n CollectionDateTime__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n receipt_imported = ReceiptImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n receipt_imported_cnt = ReceiptRecords.objects.filter(\n ImportId__in=receipt_imported.values_list('id', flat=True)[::1]).count()\n\n receipt_loaded = ReceiptRecords.objects.filter(\n DateTimeTaken__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n processed_imported = ProcessedImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n processed_imported_cnt = ProcessedReport.objects.filter(\n ImportId__in=processed_imported.values_list('id', flat=True)[::1]).count()\n\n return render(request, self.template_name, {\n \"days\": days,\n \"blood_samples_cnt\": blood_samples_loaded.count(),\n \"blood_samples_imported\": blood_samples_imported_cnt,\n 'manifest_imported': manifest_imported_cnt,\n 'manifest_loaded_count': manifest_loaded.count(),\n 'receipt_imported': receipt_imported_cnt,\n \"receipt_loaded_cnt\": receipt_loaded.count(),\n 'processed_imported': processed_imported_cnt,\n 'active': day,\n 'shownextday': shownextday,\n 'reviewed': reviewed,\n 'class': 'uploadDay',\n 'no_of_files_uploaded': no_of_files_uploaded,\n })", "def trackRentRequest(self):\n\t\t#start_date = timezone.now().date()\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Rents.objects.filter(date_of_issue__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\tif ans is None:\n\t\t\tprint \"not found\"\n\t\telse:\n\t\t\tprint \"found\"\n\t\treturn lst", "def this_week(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__gte=(datetime.date.today()), date_of_release__lte=(datetime.date.today()+timedelta(days=7)))\n movies = movies.order_by('budget')[:5]\n serializer = MovieSerializer(movies, many=True)\n return_obj = serializer.data\n return Response(return_obj)", "def update_weekly_leaderboard(request):\n \n \"\"\"NOTE:Always execute this after executing Overall leader board\"\"\"\n \n try:\n user = UserProfile.objects.order_by('-prev_week_score')\n count = UserProfile.objects.order_by('-prev_week_score').count()\n if count > 10:\n count = 10\n entries = WeeklyLeaderboard.objects.all().count()\n if entries > 0:\n week = WeeklyLeaderboard.objects.all()\n i = 0\n while i < count:\n w = week[i]\n w.username = user[i].user\n w.rank = OverallLeaderboard.objects.get(username = user[i].user)\n w.points_earned_this_week = user[i].prev_week_score\n w.save()\n i += 1\n else:\n i = 0\n while i < count:\n w = WeeklyLeaderboard()\n w.username = user[i].user\n w.rank = OverallLeaderboard.objects.get(username = user[i].user)\n w.points_earned_this_week = user[i].prev_week_score\n w.save()\n i += 1\n userpro = UserProfile.objects.all()\n count = UserProfile.objects.all().count()\n i = 0\n while i < count:\n u = userpro[i]\n u.prev_week_score = 0\n u.save()\n i += 1\n \n data = {'msg':''}\n messages.success(request, \"Weekly Leaderboard updated successfully.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))\n except:\n msg = traceback.format_exc()\n data = {'msg':msg}\n messages.error(request, \"Update Weekly Leaderboard failed.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))", "def GetAlert(diagnostic_cases, diagnostic, week,year):\n\n diag_cases = diagnostic_cases.filter(diagnostic=diagnostic)\n average = 0\n standard_deviation = 0\n cases = 0\n #number of years\n n_years = 0\n year_var = 0\n f = []\n year_ob = Year.objects.filter(year__lt=year)\n weeks = Week.objects.filter(year__in=year_ob,week=week.week).order_by('year')\n for w in weeks:\n\n\n if year_var != w.year.year:\n n_years += 1\n year_var = w.year.year\n\n\n pac = diag_cases.filter(week=w)\n x = 0\n for p in pac:\n\n cases += p.cases\n x = p.cases\n f.append(x)\n\n if cases != 0:\n\n average = cases / n_years\n\n #calculation of standar deviation\n if len(f) != 1:\n suma2 = 0\n for cases in f:\n suma2 += (cases-average)**2\n standard_deviation = math.sqrt(suma2 / len(f))\n cases = 0\n dia = diag_cases.filter(week=week)\n\n for d in dia:\n cases += d.cases\n\n #array of class dots for draw the chart\n\n lower_rank = 0\n top_rank = 0\n if n_years != 0:\n lower_rank = average - (1.96 * standard_deviation / math.sqrt(n_years))\n top_rank = average + (1.96 * standard_deviation / math.sqrt(n_years))\n\n dots = DotsGraphicAverage(average,week.week, lower_rank, top_rank,cases)\n\n return dots", "def report_missing_auto(request):\n\n usecases = UseCase.objects.all()\n json_resp = {}\n languages = ['english', 'English']\n for el in usecases:\n use = el.name\n batches = []\n batches.append('all')\n if use.lower() in ['colon','lung','uterine cervix']:\n report = Report.objects.filter(name=el,language__in=languages).exclude(institute = 'PUBMED')\n count_rep = report.count()\n for rp in report:\n if rp.batch not in batches:\n batches.append(rp.batch)\n # print(el)\n # print(count_rep)\n\n if count_rep > 0:\n json_resp[use] = {}\n for batch in batches:\n batch = str(batch)\n json_resp[use][batch] = {}\n if batch == 'all':\n json_resp[use][batch]['tot'] = count_rep\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and r.language in %s;\",\n [str(use),'Robot_user','labels','PUBMED',tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n else:\n report_count = Report.objects.filter(name=el,batch = batch,language__in=languages).exclude(institute = 'PUBMED').count()\n json_resp[use][batch]['tot'] = report_count\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND r.language = g.language WHERE r.name = %s AND g.username = %s AND gt_type=%s and institute != %s and batch = %s and r.language in %s;\",\n [str(use), 'Robot_user', 'labels', 'PUBMED',batch, tuple(languages)]) # We could consider any of the gt_type\n groundTruths = cursor.fetchone()[0]\n if groundTruths is None:\n json_resp[use][batch]['annotated'] = 0\n else:\n json_resp[use][batch]['annotated'] = groundTruths\n # print(json_resp)\n return JsonResponse(json_resp)", "def search_geoloc_range(request):\n\n distance = float(request.POST['distance'])\n\n latlng = (request.POST['latlng']).replace(\"(\",'').replace(\")\",'').split(', ')\n latitude = float(latlng[0])\n longitude = float(latlng[1])\n print distance\n print latitude\n print longitude\n\n # count range of nowa latlng\n radius_lat = (distance/(69.172)) #count latitude range\n min_lat = latitude - radius_lat\n max_lat = latitude + radius_lat\n print min_lat\n print max_lat\n\n radius_lng = (math.fabs(distance/(math.cos(longitude) * 69.172))) #count longitude range\n min_lng = longitude - radius_lng\n max_lng = longitude + radius_lng\n print min_lng\n print max_lng\n\n # if sys.version_info < (2, 7):\n # min_lat = decimal.Decimal(str(min_lat))\n # max_lat = decimal.Decimal(str(max_lat))\n # min_lng = decimal.Decimal(str(min_lng))\n # max_lng = decimal.Decimal(str(max_lng))\n\n # query db to match the range of dentist work place in db\n total = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).count()\n\n result = []\n\n # step for how many lines separate per page. then count nowa page's start line no. and end line no.\n if 'page' in request.POST:\n page = request.POST['page']\n else:\n page = 1\n\n step = 10\n end = step * int(page)\n start = step * (int(page)-1)\n is_end = False\n\n if (end - total) < step:\n is_end = False\n WorkPlaceDict = WorkPlace.objects.filter(latitude__gte=min_lat, latitude__lte=max_lat,\n longitude__gte=min_lng, longitude__lte=max_lng).order_by('id')[start:end]\n\n for i in WorkPlaceDict:\n\n dentist_profile = i.dentistid\n did = dentist_profile.user.user.id\n\n latitude = str(i.latitude)\n longitude = str(i.longitude)\n latlng = \"(\"+latitude+\", \"+longitude+\")\"\n\n counts = _relation_counts(request,did,request.user.id)\n\n i_wrap = {\n \"clinic\": i.clinic_name,\n \"work_location\": i.location,\n \"latlng\": latlng,\n \"business_hour\": str(i.business_hour),\n \"dentistid\": did,\n \"dentistname\": _show_obj_name(did),\n \"summary\": dentist_profile.user.summary,\n \"avatar\": settings.MEDIA_URL + str(dentist_profile.user.imagesmall),\n \"patient_count\": counts[\"patient_count\"],\n \"follower_count\": counts[\"follower_count\"],\n \"status\": counts[\"status\"],\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n else:\n is_end = True\n i_wrap = {\n \"is_end\": is_end\n }\n\n result.append(i_wrap)\n\n template_var = {\n \"searchresult\": result\n }\n\n return JsonResponse(template_var)", "def tipresults(request):\n #Basic counts and definitions\n recordedtips = SportsTippingScoreModel.objects.all()\n\n try:\n results = SportsTippingResultsModel.objects.get(name=\"result\")\n\n if results:\n if results.result1 == \"NOT_COMPLETE\":\n num_games1 = 0\n else:\n num_games1 = 1\n if results.result2 == \"NOT_COMPLETE\":\n num_games2 = 0\n else:\n num_games2 = 1\n if results.result3 == \"NOT_COMPLETE\":\n num_games3 = 0\n else:\n num_games3 = 1\n if results.result4 == \"NOT_COMPLETE\":\n num_games4 = 0\n else:\n num_games4 = 1\n if results.result5 == \"NOT_COMPLETE\":\n num_games5 = 0\n else:\n num_games5 = 1\n if results.result6 == \"NOT_COMPLETE\":\n num_games6 = 0\n else:\n num_games6 = 1\n if results.result7 == \"NOT_COMPLETE\":\n num_games7 = 0\n else:\n num_games7 = 1\n if results.result8 == \"NOT_COMPLETE\":\n num_games8 = 0\n else:\n num_games8 = 1\n if results.result9 == \"NOT_COMPLETE\":\n num_games9 = 0\n else:\n num_games9 = 1\n if results.result10 == \"NOT_COMPLETE\":\n num_games10 = 0\n else:\n num_games10 = 1\n\n num_games = num_games1 +num_games2 +num_games3 +num_games4 +num_games5 +num_games6 +num_games7 +num_games8 +num_games9 +num_games10\n\n else:\n num_games = 0\n\n except:\n num_games = 0\n\n context = {\n 'recordedtips': recordedtips,\n 'num_games': num_games,\n }\n\n return render(request, 'tipResults.html', context=context)", "def find_tips():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n id = business_object['business_id']\n tip_object = tip_col.find({\"business_id\": id}).limit(10)\n print(f\"{business_object['name']} tips are: \")\n for tip in tip_object:\n print(tip[\"text\"])", "def schedule():\n db = get_db()\n all_table = get_all_table()\n # # Get all team name\n # all_team_name = db.session.query(all_table[\"all_team_basic\"].c.nameEn).order_by(text('nameEn asc')).all()\n # # Get all country name\n # all_country_name = db.session.query(all_table[\"players_basic\"].c.countryEn).distinct().order_by(text('countryEn asc')).all()\n\n # # Add paginate with on the button: ( strange! can't use onclick attri? )\n # page = request.args.get('page', 1, type=int)\n # player_name_fc = request.args.get('player_', 'All Players', type=str)\n # player_team = request.args.get('team_', 'All Teams', type=str)\n # player_position = request.args.get('position_', \"All Positions\", type=str)\n # player_country = request.args.get('country_', 'All Countries', type=str)\n\n # player_full_name = request.form.get('playerfull_')\n # if player_full_name == None:\n # player_full_name = \"\"\n # # filter in name \n # if player_name_fc == \"All Players\": \n # posts = db.session.query(all_table[\"players_basic\"]).order_by(text('lastNameEn asc'))\n # else :\n # posts = db.session.query(all_table[\"players_basic\"]).filter(all_table[\"players_basic\"].c.lastNameEn.like(\"{}%\".format(player_name_fc))).order_by(text('lastNameEn asc'))\n\n # # filter in search box\n # if player_full_name != \"\":\n # posts = db.session.query(all_table[\"players_basic\"]).filter(all_table[\"players_basic\"].c.code.like(\"%{}%\".format(player_full_name))).order_by(text('lastNameEn asc'))\n\n # print(player_full_name)\n # # filter in team \n # if player_team != \"All Teams\": \n # team_id = db.session.query(all_table[\"all_team_basic\"]).filter_by(nameEn = player_team).all()\n # # if the answer is an empty set!\n # print(team_id[0])\n # cur_team_id = team_id[0][-6] \n # if len(team_id) != 0:\n # posts = posts.filter_by(teamId = cur_team_id)\n\n # # filter in position \n # if player_position != \"All Positions\": \n # posts = posts.filter(all_table[\"players_basic\"].c.position.like(\"%{}%\".format(player_position)))\n\n # # filter in country\n # if player_country != \"All Countries\":\n # posts = posts.filter_by(countryEn = player_country)\n \n # # player list in every page\n # posts_paged = posts.paginate(page, current_app.config['POSTS_PER_PAGE'], False)\n\n # still contain all the filter info \n # next_url = url_for('blog.index', page=posts_paged.next_num,\n # team_ = player_team, \n # player_ = player_name_fc, \n # position_ = player_position,\n # country_ = player_country,\n # playerfull_ = player_full_name) \\\n # if posts_paged.has_next else None\n\n # prev_url = url_for('blog.index', page=posts_paged.prev_num,\n # team_ = player_team, \n # player_ = player_name_fc, \n # position_ = player_position,\n # country_ = player_country,\n # playerfull_ = player_full_name) \\\n # if posts_paged.has_prev else None\n\n # # count current items and total pages\n # total_player_num = posts.count() \n # total_pages = math.ceil(total_player_num * 1.0 / current_app.config['POSTS_PER_PAGE'])\n\n return render_template('games/Home-Games.html')\n\n # , # all_player_brief\n # posts=posts_paged.items, \n # prev_url = prev_url, \n # next_url = next_url,\n # page = page,\n # player_name_fc = player_name_fc,\n # player_full_name = player_full_name,\n # player_team = player_team,\n # player_position = player_position,\n # player_country = player_country,\n # total_player_num = total_player_num,\n # total_pages = total_pages,\n # all_team_name = all_team_name,\n # all_country_name = all_country_name", "def get_queryset(self):\n #.1 below code was showing future poll/questions\n #.1 return Question.objects.order_by('-pub_date')[:5]\n\n #re-defining\n \"\"\"\n Return the last five published questions (not including those set to be\n published in the future).\n \"\"\" \n #imported timezone\n \n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)", "def show_last_watched_by_date(self, alias):\n date_to = datetime.date.today()\n if alias == 'day':\n date_from = date_to + datetime.timedelta(days=-1)\n elif alias == 'week':\n date_from = date_to + datetime.timedelta(days=-7)\n elif alias == 'month':\n prev_month = date_to.replace(day=1) + datetime.timedelta(days=-1)\n date_from = date_to + datetime.timedelta(days=-prev_month.day)\n else:\n print('Unknown alias - {0}'.format(alias))\n sys.exit(1)\n\n self.load_shows()\n print()\n print('Watched from {0} to {1}'.format(\n date_from.strftime('%Y-%m-%d'),\n date_to.strftime('%Y-%m-%d')\n ))\n print()\n re_c = re.compile(r'(\\d{1,2})\\.(\\d{1,2})\\.(\\d{4})')\n count = 0\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n if next_show['watchedEpisodes'] <= 0:\n continue\n watched = self.load_watched(next_show['showId'])\n epis = None\n last_map = {}\n for epi_id in watched:\n next_episode = watched[epi_id]\n re_m = re_c.match(next_episode['watchDate'])\n if not re_m:\n print('Warning: unknown date format - {0}'.format(\n next_episode['watchDate']))\n continue\n dtv = [int(s) for s in re_m.group(3, 2, 1)]\n epi_date = datetime.date(dtv[0], dtv[1], dtv[2])\n if date_from <= epi_date <= date_to:\n if not epis:\n epis = self.load_episodes(show_id)\n count += 1\n if epi_id not in epis['episodes']:\n print('Episode not found: {0}'.format(epi_id))\n logging.debug('Episodes:')\n logging.debug(epis)\n continue\n\n episode = epis['episodes'][epi_id]\n date_key = epi_date.toordinal() * 1000\\\n + episode['seasonNumber'] * 10\\\n + episode['episodeNumber']\n last_map[date_key] = episode\n\n for date_key in sorted(last_map.keys()):\n episode = last_map[date_key]\n print('{0} s{1:02d}e{2:02d} \"{3}\" at {4}'.format(\n tr_out(epis['title']),\n episode['seasonNumber'], episode['episodeNumber'],\n tr_out(episode['title']),\n watched[str(episode['id'])]['watchDate']\n ))\n print()\n print('Total count: {0}'.format(count))\n print()", "def run_allowance():\n # get current day of the week\n # get list of allowances that are weekly and match current day of the week\n # create transactions for each of them\n weekday = datetime.date.today().isoweekday()\n allowance_list = Allowance.query.filter_by(\n period='weekly',\n period_day=weekday,\n is_active=True\n )\n for allowance in allowance_list:\n #TODO check that allowance not already added\n trx = Transaction(\n allowance.kid.id,\n datetime.date.today(),\n allowance.amount,\n 'Weekly Allowance',\n description='Automatic'\n )\n trx.allowance_id = allowance.id\n trx.save(allowance.kid.account)\n\n # get current day of the month\n # if last day of the month, then include all days from last day of the month\n # to 31\n # get list of allowances that are monthly and match current day of the month\n # create transactions for each of them\n day = int(datetime.date.today().strftime(\"%d\"))\n month = int(datetime.date.today().strftime(\"%m\"))\n year = int(datetime.date.today().strftime(\"%Y\"))\n max_day = calendar.monthrange(year, month)[1]\n allowance_list = Allowance.query.filter_by(\n period='Monthly',\n )\n if day == max_day:\n allowance_list = allowance_list.filter_by(period_day >= max_day)\n else:\n allowance_list = allowance_list.filter_by(period_day = day)\n for allowance in allowance_list:\n trx = Transaction(\n allowance.kid.id,\n datetime.date.today(),\n allowance.amount,\n 'Monthly Allowance',\n description='Automatic',\n )\n trx.allowance_id = allowance.id\n trx.save(allowance.kid.account)\n\n return True" ]
[ "0.6388377", "0.5429181", "0.5272578", "0.52141297", "0.51466364", "0.51412165", "0.5139298", "0.51343983", "0.50914544", "0.50841016", "0.500031", "0.49782017", "0.49781772", "0.49575546", "0.49335107", "0.49260718", "0.49128708", "0.48942697", "0.48925683", "0.48861113", "0.48634434", "0.48599097", "0.48561648", "0.48532322", "0.48341477", "0.48148787", "0.47862342", "0.47832346", "0.47822624", "0.47687185" ]
0.7329774
0
last_week=datetime.date.today()timedelta(days=30) import pdb;pdb.set_trace() fans_list = ["wwwttshow", "ttshowpet", "draw.fans", "TTShowMusic", "GoodNews.FANS"] fans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_week, date__lte=datetime.date.today()).order_by("date") start = fans_pages[0] last = fans_pages[len(fans_pages) 1] talk_about_is = (last.talk_about_is start.talk_about_is) total_like_count = (last.total_like_count start.total_like_count) total_fans = (last.total_fans start.total_fans)
def month_report_handle(fans_type): start = datetime.date.today() - timedelta(days=datetime.date.today().day - 1) today = datetime.date.today() #import pdb;pdb.set_trace() #fans_list = ["wwwttshow", "ttshowpet", "draw.fans", "TTShowMusic", "GoodNews.FANS"] fans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=start, date__lte=today).order_by("date") start = fans_pages[0] last = fans_pages[len(fans_pages) - 1] talk_about_is = (last.talk_about_is - start.talk_about_is) total_like_count = (last.total_like_count - start.total_like_count) total_fans = (last.total_fans - start.total_fans) return {"talk_about_is":talk_about_is, "total_like_count":total_like_count, "total_fans":total_fans, "start":start.date, "last":last.date}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def week_report_handle(fans_type):\n\t#import pdb;pdb.set_trace()\n\tlast_day = datetime.date.today()-timedelta(days=datetime.datetime.today().weekday() + 1)\n\ttoday = datetime.date.today()\n\n\tfans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_day, date__lte=today).order_by(\"date\")\n\n\tstart = fans_pages[0]\n\tlast = fans_pages[len(fans_pages) - 1]\n\n\t#talk_about_is = (last.talk_about_is - start.talk_about_is) / (start.talk_about_is + 0.0) * 100\n\ttalk_about_is = (last.talk_about_is - start.talk_about_is)\n\t#total_like_count = (last.total_like_count - start.total_like_count) / (start.total_like_count + 0.0) * 100\n\ttotal_like_count = (last.total_like_count - start.total_like_count)\n\t#total_fans = (last.total_fans - start.total_fans) / (start.total_fans + 0.0) * 100\n\ttotal_fans = (last.total_fans - start.total_fans)\n\treturn {\"talk_about_is\":talk_about_is, \"total_like_count\":total_like_count, \"total_fans\":total_fans, \"start\":start.date, \"last\":last.date}", "def grabBlogPostAnalysisStarted(self): #$NON-NLS-1$\r", "def get_queryset(self):\n day = self.request.query_params.get('day',None)\n tz = pytz.timezone('Europe/London') \n dt_now = datetime.now(tz=tz) - timedelta(minutes=10) \n day_name_today = dt_now.strftime(\"%A\")\n queryset = OnlineMeeting.objects.annotate(search=SearchVector('description','title'),)\n queryset = queryset.filter(published=True)\n\n search = self.request.query_params.get('search', None)\n\n if search is not None and len(search) > 0:\n queryset = queryset.filter(search=search)\n\n now = self.request.query_params.get('now',None)\n top = int(self.request.query_params.get('top',0))\n if day=='now':\n \n \n date_today = dt_now.date()\n time_now = dt_now.time()\n datetime_now = datetime.combine(date_today,time_now)\n \n tomorrow = dt_now + timedelta(days=1) \n day_name_tomorrow = tomorrow.strftime(\"%A\")\n \n meetings_today = OnlineMeeting.objects.filter(((Q(day=day_name_today) | Q(day='All')) & Q(time__gte=dt_now.time())))#.order_by('time')\n meetings_tomorrow = OnlineMeeting.objects.filter((Q(day=day_name_tomorrow) & Q(time__lte=dt_now.time())))#.order_by('time')\n \n\n all = meetings_today #| meetings_tomorrow\n if day_name_today == 'sunday':\n all_ordered = all.order_by('time')\n else:\n all_ordered = all.order_by('time')\n \n if top:\n all_ordered = all_ordered[:top]\n return all_ordered#.annotate(the_rank=rank_by_day)\n elif day in ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']:\n queryset = queryset.filter(Q(day=day) | Q(day='All') )\n \n \n \n \n \n return queryset.order_by('time')", "def get_queryset(self):\n #.1 below code was showing future poll/questions\n #.1 return Question.objects.order_by('-pub_date')[:5]\n\n #re-defining\n \"\"\"\n Return the last five published questions (not including those set to be\n published in the future).\n \"\"\" \n #imported timezone\n \n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def find_winner(request):\n r = {}\n\n test = False\n if request.POST.get('test','0') == '1':\n test = True \n\n if request.POST.get('code','000') == 'ch00seW199Er':\n # check the number of people who transacted today\n d = date.today() #-timedelta(30)\n win_begin = datetime(year=d.year, month=d.month, day=d.day,\n hour=0, minute=0, second=0)\n win_end = datetime(year=d.year, month=d.month, day=d.day,\n hour=23, minute=59, second=59)\n\n # check the number of people who logged in today\n logged_in = Event.objects.filter(action=Event.LOGIN,timestamp__gt=win_begin, timestamp__lt=win_end).values('user').distinct()\n logger.debug(\"People that used: %s\"%str(logged_in))\n \n # check the number of people who saw the feed today\n feed_viewed = FeedEvent.objects.filter(action=FeedEvent.FEED,timestamp__gt=win_begin, timestamp__lt=win_end).values('user').distinct()\n logger.debug(\"People that used the feed: %s\"%str(feed_viewed))\n\n # check the number of people who have reviewed\n reviewed = Receipt.objects.filter(last_update__lt=win_end, last_update__gt=win_begin).values_list(\"txn__user\", flat=True).distinct()\n logger.debug(\"People that reviewed: %s\"%str(reviewed))\n\n # exclude previous winners\n prev_winners = Winner.objects.all().values('user__id')\n #.exclude(id__in=prev_winners)\n\n r_start = Q(techcashtransaction__receipt__last_update__gt=win_begin)\n r_end = Q(techcashtransaction__receipt__last_update__lt=win_end)\n t_start = Q(techcashtransaction__timestamp__gt=win_begin)\n t_end = Q(techcashtransaction__timestamp__lt=win_end)\n f_viewed = Q(id__in=feed_viewed)\n\n users_today = OTNUser.objects.filter(f_viewed | (r_start & r_end) | (t_start & t_end)).order_by('id').distinct()\n logger.debug(\"People that made txns (%d): %s\"%(users_today.count(), str(users_today)))\n\n # randomly select\n winner_id=-1\n if users_today.count() == 0:\n return JSONHttpResponse({'result':'0'})\n elif users_today.count() == 1:\n winner_id = users_today[0].id\n winner = users_today[0]\n else:\n # exclude Kwan, John McDonald, Dawei Shen, Alter Yod\n exclude_list=[-1, 2, 3, 5]\n while winner_id in exclude_list:\n draw = randint(0,users_today.count()-1)\n winner = users_today[draw] \n winner_id = winner.id\n \n if not test:\n # save to DB\n win_prize = Winner(user=winner, prize=\"$5 TechCASH\")\n win_prize.save()\n \n # if called the day after\n win_prize.timestamp = d \n win_prize.save()\n\n # email mitcard to credit from DIGRECPT1 to MIT ID\n msg = \"%s is today's OTN/MealTime winner!\\n\\nPlease transfer $5 from DIGRECPT1 to %s.\\n\\n-kwan\"%(winner.name, winner.mit_id)\n send_mail('OTN Winner', msg, '[email protected]', ['[email protected]'], fail_silently=False) \n\n # email winner\n msg = \"You are today's MealTime winner!\\n\\nYou will receive $5 credit in your TechCASH account.\\n\\n-kwan\"\n send_mail('OTN Winner', msg, '[email protected]', [winner.my_email, '[email protected]'], fail_silently=False) \n\n r['result'] = {'name':winner.name, 'id':\"%d\"%winner.id}\n else:\n r['result'] = '-1'\n\n return JSONHttpResponse(r)", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def get(self, request, *args, **kwargs):\n review_type = request.GET.get(\"type\", \"blood_sample\")\n page = int(request.GET.get('page', 1))\n # try:\n # day = request.GET.get(\n # 'day', datetime.datetime.today().strftime('%B %d, %Y'))\n # except:\n # day = request.GET.get(\n # 'day', datetime.datetime.today().strftime('%b. %d, %Y'))\n table = request.GET.get('table', 'False')\n day, days = UploadView.get_dayformated_and_days(self, request)\n\n if review_type == \"blood_sample\":\n\n blood_samples_imported = BloodSampleImport.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n # Checking latest uploaded sample file is reviewed or not\n if blood_samples_imported.count() > 0:\n sample_import_latest = blood_samples_imported.last()\n if not sample_import_latest.Reviewed:\n sample_import_latest.Reviewed = True\n sample_import_latest.save()\n\n if request.GET.get('firstOpen', 'False') == \"True\":\n if not day < datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0):\n day = BloodSample.objects.all().order_by('-CreatedAt').first().CreatedAt\n days = [(day - datetime.timedelta(days=x))\n for x in range(4)]\n days.reverse()\n # import ipdb\n # ipdb.set_trace()\n query_results = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0))).order_by('CreatedAt', 'CohortId', 'Barcode')\n if query_results.count() == 0 and request.GET.get('firstOpen', 'False') == \"True\":\n day = BloodSample.objects.all().order_by('-CreatedAt').first().CreatedAt\n days = [(day - datetime.timedelta(days=x))\n for x in range(4)]\n days.reverse()\n query_results = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0))).order_by('CreatedAt', 'CohortId', 'Barcode')\n # query_results = BloodSample.objects.filter(\n # ImportId__in=blood_samples_imported.values_list('id', flat=True)[::1]).order_by('id')\n\n paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)\n\n if table == \"True\":\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n return render(request, self.blood_sample_review_table_template, {\n \"objects\": results.object_list,\n \"current_page\": page,\n \"class\": 'reviewBloodDay',\n \"total_pages\": paginator.num_pages\n })\n\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n return render(request, self.blood_sample_review_template, {\n \"current_page\": page,\n \"total_pages\": paginator.num_pages,\n \"days\": days,\n \"active\": day,\n \"shownextday\": shownextday,\n \"class\": 'reviewBloodDay',\n })\n\n if review_type == \"manifest\":\n manifest_imported = ManifestImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n query_results = ManifestRecords.objects.filter(\n ImportId__in=manifest_imported.values_list('id', flat=True)[::1]).order_by('id')\n\n paginator = Paginator(query_results, settings.ITEMS_PER_PAGE)\n\n if table == \"True\":\n try:\n results = paginator.page(page)\n except PageNotAnInteger:\n results = paginator.page(1)\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n\n cursor.execute('''\n SELECT * FROM \"blood_sample_bloodsample\"\n INNER JOIN \"blood_sample_manifestrecords\" ON ( \"blood_sample_bloodsample\".\"CohortId\" = \"blood_sample_manifestrecords\".\"CohortId\" )\n ''')\n\n # row = cursor.fetchall()\n columns = [col[0] for col in cursor.description]\n data = [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n # import ipdb\n # ipdb.set_trace()\n # print(row)\n\n # BloodSample.objects.filter(\n # CohortId__in=results.object_list.values_list('CohortId', flat=True)[::1])\n\n return render(request, self.manifest_review_table_template, {\n \"objects\": results.object_list,\n # \"blood_sample_objects\": BloodSample.objects.filter(\n # CohortId__in=results.object_list.values_list('CohortId', flat=True)[::1]),\n \"current_page\": page,\n \"total_pages\": paginator.num_pages\n })\n\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n return render(request, self.manifest_review_template, {\n \"current_page\": page,\n \"total_pages\": paginator.num_pages,\n \"days\": days,\n \"active\": day,\n \"shownextday\": shownextday,\n \"class\": 'reviewManifestDay',\n })", "def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def show_last_watched_by_date(self, alias):\n date_to = datetime.date.today()\n if alias == 'day':\n date_from = date_to + datetime.timedelta(days=-1)\n elif alias == 'week':\n date_from = date_to + datetime.timedelta(days=-7)\n elif alias == 'month':\n prev_month = date_to.replace(day=1) + datetime.timedelta(days=-1)\n date_from = date_to + datetime.timedelta(days=-prev_month.day)\n else:\n print('Unknown alias - {0}'.format(alias))\n sys.exit(1)\n\n self.load_shows()\n print()\n print('Watched from {0} to {1}'.format(\n date_from.strftime('%Y-%m-%d'),\n date_to.strftime('%Y-%m-%d')\n ))\n print()\n re_c = re.compile(r'(\\d{1,2})\\.(\\d{1,2})\\.(\\d{4})')\n count = 0\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n if next_show['watchedEpisodes'] <= 0:\n continue\n watched = self.load_watched(next_show['showId'])\n epis = None\n last_map = {}\n for epi_id in watched:\n next_episode = watched[epi_id]\n re_m = re_c.match(next_episode['watchDate'])\n if not re_m:\n print('Warning: unknown date format - {0}'.format(\n next_episode['watchDate']))\n continue\n dtv = [int(s) for s in re_m.group(3, 2, 1)]\n epi_date = datetime.date(dtv[0], dtv[1], dtv[2])\n if date_from <= epi_date <= date_to:\n if not epis:\n epis = self.load_episodes(show_id)\n count += 1\n if epi_id not in epis['episodes']:\n print('Episode not found: {0}'.format(epi_id))\n logging.debug('Episodes:')\n logging.debug(epis)\n continue\n\n episode = epis['episodes'][epi_id]\n date_key = epi_date.toordinal() * 1000\\\n + episode['seasonNumber'] * 10\\\n + episode['episodeNumber']\n last_map[date_key] = episode\n\n for date_key in sorted(last_map.keys()):\n episode = last_map[date_key]\n print('{0} s{1:02d}e{2:02d} \"{3}\" at {4}'.format(\n tr_out(epis['title']),\n episode['seasonNumber'], episode['episodeNumber'],\n tr_out(episode['title']),\n watched[str(episode['id'])]['watchDate']\n ))\n print()\n print('Total count: {0}'.format(count))\n print()", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]", "def this_week(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__gte=(datetime.date.today()), date_of_release__lte=(datetime.date.today()+timedelta(days=7)))\n movies = movies.order_by('budget')[:5]\n serializer = MovieSerializer(movies, many=True)\n return_obj = serializer.data\n return Response(return_obj)", "def designTest(request):\n\n MAX_NEWS = 10\n start_id = '0'\n end_id = string.atoi(start_id) + 10\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"-date\")[start_id:end_id] # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n # Vypocet prvniho ID z predchozi skupiny novinek (jedna skupina = MAX_NEWS) \n start_id_num = string.atoi(start_id)\n if (start_id_num + MAX_NEWS) < news_count:\n preview_start_id = start_id_num + MAX_NEWS\n else:\n preview_start_id = start_id_num\n\n # Vypocet prvniho ID z nasledujici skupiny novinek (jedna skupina = MAX_NEWS) \n next_start_id = start_id_num - MAX_NEWS # prvni ID nasledujicich novinek\n if next_start_id < 0:\n next_start_id = 0;\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek, 'start_id': start_id,\n 'preview_start_id': preview_start_id, 'next_start_id': next_start_id}\n return render(request, 'designTest/news_design_test.html', context)", "def touragenda(request):\n active_events = TourAgendaModel.objects.order_by('number')\n friday_events = TourAgendaModel.objects.all().filter(day='FRIDAY')\n saturday_events = TourAgendaModel.objects.all().filter(day='SATURDAY')\n sunday_events = TourAgendaModel.objects.all().filter(day='SUNDAY')\n\n context = {\n 'active_events': active_events,\n 'friday_events': friday_events,\n 'saturday_events': saturday_events,\n 'sunday_events': sunday_events,\n }\n\n return render(request, 'tourAgenda.html', context=context)", "def trackRentRequest(self):\n\t\t#start_date = timezone.now().date()\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Rents.objects.filter(date_of_issue__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\tif ans is None:\n\t\t\tprint \"not found\"\n\t\telse:\n\t\t\tprint \"found\"\n\t\treturn lst", "def newsList(request):\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"date\") # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek}\n return render(request, 'news/newsList.html', context)", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()) #แสดงคำถามและตัวเลือกของคำถาม", "def home(request: Request):\n latest_question_list = request.dbsession.query(Question).order_by(Question.published_at.desc()).all()\n return locals()", "def task_tracking(request):\n task_name = [] \n date_text = request.GET.get('date', datetime.today().strftime(date_format))\n try:\n date = datetime.strptime(date_text.strip('/'), date_format)\n except ValueError:\n return HttpResponseNotFound(pagenotfound_msg)\n# if date > (datetime.today()).date():\n# return HttpResponseNotFound(pagenotfound_msg)\n# elif date < (datetime.today()).date() + relativedelta(months=-1):\n# return HttpResponseForbidden(forbidden_msg)\n# selected_tasks1 = Task.objects.filter(id__in = cache.get(request.user.pk, []))\n selected_tasks = TaskTrackingNew.objects.filter(user = request.user)\n project_set = Project.objects.filter(Q(apex_body_owner=request.user, is_active = True) |\n Q(owner=request.user, is_active = True) |\n Q(team=request.user, is_active = True) | Q(requested_by = request.user, is_active = True)).distinct().exclude(cancel = True)\n project_id = request.GET.get('project')\n nonprojecttask = Task.objects.filter(project=None)\n for each_pjt in project_set:\n task = Task.objects.filter(project= each_pjt.id).filter(assigned_resources = request.user) \n sel_task = [] \n each_pjt.__dict__.update({'Task':task,'sel_task':sel_task})\n tasks = serializers.serialize('json', task, fields=('name','id'))\n json = simplejson.dumps(tasks)\n dtstart = datetime( 1900, 01, 01,0, 0, 0, 0 )\n dtend = datetime( 1900, 01, 01, 23, 0, 0, 0 )\n times = []\n times.append(dtstart.strftime(\"%H.%M\"))\n while dtstart <= dtend:\n dtstart = dtstart + timedelta(minutes=30)\n times.append(dtstart.strftime(\"%H.%M\"))\n if request.method == 'POST': \n dif = TaskTrackingNew.objects.filter(date = date,user=request.user)\n dif.delete()\n for element in range(1, 5):\n for each in project_set: \n task = request.POST.get(str(each.id)+'_task'+str(element))\n if task == '' or task == None :\n continue\n Timefrom = request.POST.get(str(each.id)+'_Timefrom' + str(element))\n if Timefrom == '' or Timefrom == None :\n continue\n Timeto = request.POST.get(str(each.id)+'_Timeto' + str(element)) \n if Timeto == '' or Timeto == None :\n continue\n start_dt = dt.datetime.strptime(Timefrom, '%H.%M')\n end_dt = dt.datetime.strptime(Timeto, '%H.%M')\n diff = (end_dt - start_dt)\n diff.seconds/60 \n date_text = request.POST.get('date','')\n date = datetime.strftime(datetime.strptime(str(date_text.strip('/')),'%m/%d/%Y'),'%Y-%m-%d')\n project_dict = ({'user_id':request.user.id,\n 'project_id' :each.id,\n 'task_id':task,\n 'time_from':Timefrom,\n 'time_to':Timeto,\n 'time_spent':diff,\n 'date':date,\n })\n time_save = TaskTrackingNew(**project_dict)\n time_save.save() \n task = request.POST.get('0_task' + str(element))\n if task == '' or task == None :\n continue\n Timefrom = request.POST.get('0_Timefrom' + str(element))\n if Timefrom == '' or Timefrom == None :\n continue\n Timeto = request.POST.get('0_Timeto' + str(element))\n if Timeto == '' or Timeto == None :\n continue\n start_dt = dt.datetime.strptime(Timefrom, '%H.%M')\n end_dt = dt.datetime.strptime(Timeto, '%H.%M')\n diff = (end_dt - start_dt)\n diff.seconds/60 \n date_text = request.POST.get('date','')\n date = datetime.strftime(datetime.strptime(str(date_text.strip('/')),'%m/%d/%Y'),'%Y-%m-%d') \n nonproject_dict = ({'user_id':request.user.id,\n 'project_id' :'0',\n 'task_id':task,\n 'time_from':Timefrom,\n 'time_to':Timeto,\n 'time_spent':diff,\n 'date':date,\n }) \n timesheet_save = TaskTrackingNew(**nonproject_dict)\n timesheet_save.save()\n return HttpResponseRedirect('/timesheetnew/edit/?date='+date_text)\n return render_to_response('timesheet2.html', {'projects' : project_set,'each_pjt' : each_pjt,'sel_non_pjt_tsk': [],'Task' : nonprojecttask,'times':times,'date_text': date_text }, context_instance = RequestContext(request))", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def create_reports():\n \n date_now = datetime.now()\n for report in Report.objects.filter(activated=True):\n\t\n\tif report.last_report == None or report.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[report.interval]):\n\t #if report is now so do not execute it times \n\t if report.last_report != None and report.interval == 'n':\n\t\tcontinue\n\t if report.date_to != None and report.date_to < date_now:\n\t\tcontinue\n\t \n\t # check if query is good\n\t check_ok, db_query = check_query(report)\n\t if not check_ok:\n\t\tcontinue\n\t \n\t # check if date patterns are in query\n\t date_pattern_from = string.find(db_query, \"${{d1}}\")\n\t date_pattern_to = string.find(db_query, \"${{d2}}\")\n\t if date_pattern_from != -1:\n\t\tdate_from = date_now - timedelta( seconds=PERIOD_CHOICES[report.interval])\n\t else:\n\t\tdate_from = None\n\t if date_pattern_to != -1:\n\t\tdate_to = date_now\n\t else:\n\t\tdate_to = None\n\n\t # excute reports for past periods\n\t if not execute_past_reports(report, db_query, date_from, date_to, date_now):\n\t\tcontinue\n\n\t # execute query for this time\n\t if date_from != None:\n\t\tdb_query = string.replace(db_query, \"${{d1}}\", \"new Date(%s,%s,%s)\" % (date_from.year, date_from.month - 1, date_from.day))\n\t if date_to != None:\n\t\tdb_query = string.replace(db_query, \"${{d2}}\", \"new Date(%s,%s,%s)\" % (date_to.year, date_to.month - 1, date_to.day))\n\n\t if not execute_query(db_query, report, date_now):\n\t\tprint \"error - unsupported query: report title: %s, id: \" % (report.title, report.id)\n\t\tcontinue\n\n return True", "def find_tips():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n id = business_object['business_id']\n tip_object = tip_col.find({\"business_id\": id}).limit(10)\n print(f\"{business_object['name']} tips are: \")\n for tip in tip_object:\n print(tip[\"text\"])", "def get(self, request, *args, **kwargs):\n p_period_str = kwargs['p_period']\n l_best_media = None\n liked_photos = None\n p_period_verbose = None\n\n try:\n p_period = int(p_period_str)\n except:\n raise\n\n if p_period == 0:\n p_period_verbose = _('today')\n if p_period == 1:\n p_period_verbose = _('1 day ago')\n if p_period == 2:\n p_period_verbose = str(p_period) + _(' days ago')\n\n\n date_from = datetime.today() - timedelta(days=p_period+1)\n date_from_to = datetime.today() - timedelta(days=p_period)\n\n # Common for all members views ===================================================\n l_categories = Category.objects.all()\n l_attributes = Attribute.objects.all()\n try:\n logged_member = Member.objects.get(django_user__username=request.user)\n show_describe_button = logged_member.is_editor(request)\n is_monthly_member = logged_member.is_monthly_member()\n is_yearly_member = logged_member.is_yearly_member()\n except ObjectDoesNotExist:\n logged_member = None\n except:\n raise HttpResponseNotFound\n\n\n # END Common for all members views ===============================================\n #l_squarefollowing_queryset = SquareFollowing.objects.all()\n l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n\n l_token = logged_member.get_member_token(request)\n instagram_session = InstagramSession(p_is_admin=False, p_token=l_token['access_token'])\n instagram_session.init_instagram_API()\n #l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n l_smart_feed_helper = SmartFeedHelper(\n p_feed_owner_instagram_id=logged_member.instagram_user_id,\n p_instagram_session=instagram_session,\n p_batch_size=SMART_FEED_BATCH_SIZE,\n p_min_id=None,\n p_date_from=date_from,\n p_date_to=date_from_to\n )\n l_best_media = l_smart_feed_helper.find_best_media(\n p_media_to_return=SMART_FEED_BATCH_SIZE,\n p_starting_media_id=None,\n p_logged_member=logged_member,\n p_max_days=30\n )\n\n liked_photos = []\n for x_media in l_best_media:\n my_likes = MyLikes(request.user.username, x_media.id, instagram_session )\n has_user_liked_media, no_of_likes = my_likes.has_user_liked_media()\n if has_user_liked_media:\n liked_photos.extend([x_media.id])\n\n\n # Limit calculation --------------------------------------------------------------\n logged_member.refresh_api_limits(request)\n x_ratelimit_remaining, x_ratelimit = logged_member.get_api_limits()\n\n x_ratelimit_used = x_ratelimit - x_ratelimit_remaining\n if x_ratelimit != 0:\n x_limit_pct = (x_ratelimit_used / x_ratelimit) * 100\n else:\n x_limit_pct = 100\n # END Limit calculation ----------------------------------------------------------\n\n return render(request,\n self.template_name,\n dict(\n best_media=l_best_media,\n liked_photos=liked_photos,\n period_verbose=p_period_verbose,\n period_number=p_period,\n squarefollowings_count=l_squarefollowings_count,\n new_friends_interaction=0,\n\n is_monthly_member=is_monthly_member,\n is_yearly_member=is_yearly_member,\n logged_member=logged_member,\n x_ratelimit_remaining=x_ratelimit_remaining,\n x_ratelimit=x_ratelimit,\n x_limit_pct=x_limit_pct,\n categories=l_categories,\n attributes=l_attributes,\n )\n )", "def get(self, request, *args, **kwargs):\n day, days = self.get_dayformated_and_days(request)\n shownextday = datetime.datetime.today().strftime(\n '%d%b%y') in [i.strftime('%d%b%y') for i in days]\n\n blood_samples_loaded = BloodSample.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n blood_samples_imported = BloodSampleImport.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n blood_samples_imported_cnt = BloodSample.objects.filter(\n ImportId__in=blood_samples_imported.values_list('id', flat=True)[::1]).count()\n try:\n reviewed = blood_samples_imported.last().Reviewed\n except:\n reviewed = False\n\n manifest_imported = ManifestImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n no_of_files_uploaded = manifest_imported.count()\n manifest_imported_cnt = ManifestRecords.objects.filter(\n ImportId__in=manifest_imported.values_list('id', flat=True)[::1]).count()\n\n manifest_loaded = ManifestRecords.objects.filter(\n CollectionDateTime__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n receipt_imported = ReceiptImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n receipt_imported_cnt = ReceiptRecords.objects.filter(\n ImportId__in=receipt_imported.values_list('id', flat=True)[::1]).count()\n\n receipt_loaded = ReceiptRecords.objects.filter(\n DateTimeTaken__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n\n processed_imported = ProcessedImports.objects.filter(\n CreatedAt__range=(day.replace(hour=0, minute=0, second=0, microsecond=0), day.replace(\n hour=23, minute=59, second=59, microsecond=0)))\n processed_imported_cnt = ProcessedReport.objects.filter(\n ImportId__in=processed_imported.values_list('id', flat=True)[::1]).count()\n\n return render(request, self.template_name, {\n \"days\": days,\n \"blood_samples_cnt\": blood_samples_loaded.count(),\n \"blood_samples_imported\": blood_samples_imported_cnt,\n 'manifest_imported': manifest_imported_cnt,\n 'manifest_loaded_count': manifest_loaded.count(),\n 'receipt_imported': receipt_imported_cnt,\n \"receipt_loaded_cnt\": receipt_loaded.count(),\n 'processed_imported': processed_imported_cnt,\n 'active': day,\n 'shownextday': shownextday,\n 'reviewed': reviewed,\n 'class': 'uploadDay',\n 'no_of_files_uploaded': no_of_files_uploaded,\n })", "def get_blogs(request):\n address = request.POST.get('address')\n\n results = {\n \"sub\": [],\n \"mine\": [],\n \"browse\": []\n }\n my_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from=address).order_by('-time')\n for m in my_blogs:\n results['mine'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n my_sub_ids = [s.address for s in Subscription.objects.all()]\n\n sub_blogs = Blog.objects.filter(~Q(msg=\"\"), address_from__in=my_sub_ids).order_by(\"-time\")\n for m in sub_blogs:\n results['sub'].append({\n \"address_from\": m.address_from,\n \"block_index\": m.block_index,\n \"tx_id\": m.tx_id,\n \"msg\": m.msg,\n \"key\": m.key,\n \"time\": m.time\n })\n\n browsable_blogs = {}\n browse_blogs_db = Blog.objects.filter(~Q(address_from__in=my_sub_ids)).order_by('-time')\n for m in browse_blogs_db:\n if m.address_from not in browsable_blogs:\n browsable_blogs[m.address_from] = {\n \"address_from\": m.address_from,\n \"latest_post_time\": m.time,\n \"total_posts\": 1\n }\n else:\n browsable_blogs[m.address_from]['total_posts'] += 1\n\n results['browse'] = sorted(browsable_blogs.values(), key=lambda k: k['latest_post_time'])\n\n return HttpResponse(json.dumps({\n \"status\": \"success\",\n \"data\": results\n }, default=helpers.json_custom_parser), content_type='application/json')", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]", "def posts_info(self,soup,Urls_list,Likes,URLS,Date):\n \n while 1:\n time.sleep(0.2)\n post=soup.find_all('div',class_=\"by\") \n for i in post:\n l=i.find('span',id=re.compile(\"like_\"))\n Hr=i.find('a',href=re.compile(\"#footer_action_list\"))\n if Hr==None:\n Hr=i.find('a',href=re.compile(\"/story.php\"))\n \n \n d=i.find('abbr')\n \n if Hr!=None:\n Href=Hr['href']\n Href=Href.replace('https://m.facebook.com','')\n Href=Href.replace('https://mbasic.facebook.com','') \n Urls_list.append(Href)\n if d !=None:\n date=d.get_text()\n Date.append(date)\n else:\n Date.append('None')\n \n if l!=None: \n if l.get_text()!=None:\n likes=l.get_text()\n if likes==\"Like · React\":\n likes='0'\n else:\n likes=likes.replace('· Like · React','') \n likes=likes.replace(\"· Like\",'')\n likes=likes.replace(\"· Love\",'')\n likes=likes.replace(\"· Haha\",'')\n likes=likes.replace(\"· Care\",'')\n likes=likes.replace(\"· Wow\",'')\n likes=likes.replace(\"· Angry\",'')\n Likes.append(likes)\n else:\n Likes.append(\"0\")\n else:\n Likes.append(\"0\")\n \n \n more=self.more_page(soup)\n if more !=None:\n soup=self.get_page(more,session)\n \n else:\n break\n \n Urls_list,URLS=self.clean_url(Urls_list,URLS) \n \n return Urls_list,URLS,Likes,Date", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def get_queryset(self):\n\t\treturn EffortInstance.objects.order_by('-date_start')[:15]" ]
[ "0.7132085", "0.54012233", "0.5325321", "0.52955115", "0.5279257", "0.5191531", "0.5127942", "0.51076037", "0.5076983", "0.5066421", "0.5050685", "0.5035277", "0.5031984", "0.5012764", "0.50095993", "0.5009432", "0.49920762", "0.4978632", "0.49514946", "0.49369985", "0.49243346", "0.4885209", "0.48589903", "0.48373666", "0.48266935", "0.4817463", "0.48142654", "0.48093686", "0.48093686", "0.48078695" ]
0.6392885
1
Get a range of pages from the Solr index.
def getPageRange(base_url, node, page_range, page_size, from_date=None, to_date=None, delay=None): docs = None for p in page_range: print "Getting page %d" % (p) page_result = getPage(base_url, node, p, from_date=from_date, to_date=to_date) if docs is None: docs = page_result else: docs = docs.append(page_result) if delay is not None: time.sleep(delay) return docs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))", "def __pages_range(self):\n return range(1, self.total_pages + 1)", "def index_range(page: int, page_size: int) -> tuple:\n start: int = (page - 1) * page_size\n end: int = page_size * page\n return (start, end)", "def _get_paginator_range(self, pages):\n range_start = pages.number - 5 if pages.number > 5 else 1\n if pages.number < (pages.paginator.num_pages - 4):\n range_end = pages.number + 4\n else:\n range_end = pages.paginator.num_pages\n return [i for i in range(range_start, range_end + 1)]", "def index_range(page: int, page_size: int) -> Tuple[int, int]:\n if page and page_size:\n start: int = (page - 1) * page_size\n end: int = start + page_size\n return (start, end)", "def page(self, pagenum):\r\n \r\n lower, upper = self.from_to(pagenum)\r\n return self.results[lower:upper]", "def index_range(page: int, page_size: int) -> Tuple[int, int]:\n return ((page-1) * page_size, page * page_size)", "def pages(self):\n # The page list comes in three sections. Given radius=3:\n # 0 1 2 ... n-2 n-1 n n+1 n+2 ... m-2 m-1 m\n # Alas, some caveats:\n # - These sections might overlap.\n # - The current page might not be integral.\n delta = self.radius - 1 # since the below two are off by one\n before_current = int(math.ceil(self.current_page - 1))\n after_current = int(math.floor(self.current_page + 1))\n pages = []\n\n # First through current\n if before_current - delta <= 1:\n pages.extend(range(0, before_current + 1))\n else:\n pages.append(None)\n pages.extend(range(\n before_current - delta, before_current + 1))\n\n # Current\n pages.append(self.current_page)\n\n # Current through end\n if self.last_page is None:\n # Don't know the last page. Show one more and ..., if appropriate\n if self.next_item and \\\n after_current * self.page_size <= self.maximum_skip:\n\n pages.append(after_current)\n pages.append(None)\n return pages\n\n if after_current + delta >= self.last_page - 1:\n pages.extend(range(\n after_current, self.last_page + 1))\n else:\n pages.extend(range(after_current, after_current + delta + 1))\n pages.append(None)\n\n return pages", "def index_range(page: int, page_size: int) -> Tuple[int, int]:\n return ((page_size*page) - page_size, page_size*page)", "def paginate_queryset(self, queryset, request, view=None):\n self.count = self.get_count(queryset)\n self.start_index = 0\n self.end_index = self.start_index + self.page_size - 1\n\n # TODO: this logic is repeated below...\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n range_string = request.GET.get(self.range_query_param)\n\n if range_string:\n try:\n page_range = json.loads(range_string)\n except json.JSONDecodeError:\n return None\n\n if len(page_range) != 2:\n return None\n\n self.start_index, self.end_index = [pagination._positive_int(x) for x in page_range]\n\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n if self.start_index > self.end_index:\n self.start_index = self.end_index\n\n return list(queryset[self.start_index:self.end_index + 1])", "def index_range(self, page: int, page_size: int) -> Tuple[int, int]:\n return ((page_size*page) - page_size, page_size*page)", "def get_page_list(self, offset=0, limit=50):\n return self._telegraph.method('getPageList', {\n 'offset': offset,\n 'limit': limit\n })", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n self.dataset()\n index_tuple: Tuple = index_range(page, page_size)\n start_index: int = index_tuple[0]\n end_index: int = index_tuple[1]\n return self.__dataset[start_index:end_index]", "def get_paged_entries(start_page=0, pg_size=2):\n skip = pg_size * start_page\n start, end = 0 + skip, pg_size - 1 + skip\n while True:\n entries = r.zrevrange('entry_index', start, end)\n if not entries:\n break\n yield entries\n start, end = start + pg_size, end + pg_size", "def get_index_page(self, bucket_name, index_name, start_value, end_value,\n return_terms=False, max_results=None,\n continuation=None):\n # Get all matching results\n index = self._get_bucket(bucket_name)[\"indexes\"].get(index_name, [])\n if end_value is None:\n in_range = lambda v: v[0] == start_value\n else:\n in_range = lambda v: start_value <= v[0] <= end_value\n results = filter(in_range, index)\n\n # Drop all results we've returned previously, if any.\n if continuation is not None:\n continuation = tuple(json.loads(base64.b64decode(continuation)))\n while results and results[0] < continuation:\n results.pop(0)\n\n # If we're not paginated, we're done.\n if max_results is None:\n return self._return_index_page(\n bucket_name, index_name, start_value, end_value, return_terms,\n max_results, results, continuation=None)\n\n # Truncate the results and build the continuation token.\n continuation = None\n truncated_results = results[:max_results]\n if len(truncated_results) < len(results):\n continuation = base64.b64encode(json.dumps(results[max_results]))\n return self._return_index_page(\n bucket_name, index_name, start_value, end_value, return_terms,\n max_results, truncated_results, continuation=continuation)", "def get_pages(offset=None, limit=None):\n articles = list(pages)\n # assign section value if none was provided in the metas\n for article in articles:\n if not article.meta.get(\"section\"):\n article.meta[\"section\"] = article.path.split(\"/\")[0]\n\n # filter unpublished article\n if not app.debug:\n articles = [p for p in articles if p.meta.get(\"draft\") is not True]\n\n articles = sorted(articles, reverse=True, key=lambda p: p.meta[\"date\"])\n\n if offset and limit:\n return articles[offset:limit]\n elif limit:\n return articles[:limit]\n elif offset:\n return articles[offset:]\n else:\n return articles", "def index_pages():\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end", "def index_queryset(self):\r\n return Page.objects.all()", "def getPage(base_url, node, page=1, from_date=None, to_date=None, page_size=1000):\n\n\tidentifiers = []\n\tauthoritativeMNs = []\n\n\tparam_rows = page_size\n\tparam_start = (page - 1) * page_size\n\n\t# Construct query URL\n\tquery_url = base_url + \"/query/solr/?&q=formatType:METADATA+AND+-obsoletedBy:*\"\n\n\t# Extra query params\n\tif node is not None:\n\t\tnode_short_identifier = node.split(\":\")\n\t\tnode_short_identifier = node_short_identifier[len(node_short_identifier) - 1]\n\n\t\tquery_url = query_url + \"+AND+datasource:*\" + node_short_identifier\n\n\tif from_date is not None and to_date is None:\n\t\tquery_url += \"+AND+dateUploaded:[{}%20TO%20NOW]\".format(from_date)\n\n\tif from_date is None and to_date is not None:\n\t\tquery_url += \"+AND+dateUploaded:[1970-01-01T00:00:00.000Z%20TO%20{}]\".format(to_date)\n\n\tif from_date is not None and to_date is not None:\n\t\tquery_url += \"+AND+dateUploaded:[{}%20TO%20{}]\".format(from_date, to_date)\n\n\t# Fields and extra field params\n\tquery_url += \"&fl=identifier,authoritativeMN\"\n\n\tif from_date is not None or to_date is not None:\n\t\tquery_url += \",dateUploaded\"\n\n\t# Add rows and start parameter\n\tquery_url = query_url + \"&rows=\" + \\\n\t\tstr(param_rows) + \"&start=\" + str(param_start) + \"&wt=csv\"\n\n\tdocs = pandas.read_csv(query_url)\n\n\treturn docs", "def _get_pages(page_size, total_records):\r\n pages = total_records/page_size+bool(total_records%page_size)\r\n return range(1, pages+1)", "def range_query(self, start_key, end_key):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n\n print \"THIS IS FAKE\"\n pycastle_log.info(\"Doing range query from key \"+str(start_key)+\" to key \"+str(end_key))\n try:\n i = 0\n while i < 10:\n yield i\n i+=1\n if i % 5 == 0:\n pycastle_log.info(\"Getting next batch\")\n except GeneratorExit:\n pycastle_log.info(\"User requested stop of range query from key \"+str(start_key)+\" to key \"+str(end_key))", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n\n range = index_range(page, page_size)\n self.dataset()\n return self.__dataset[range[0]: range[1]]", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def page_query(q):\n\toffset = 0\n\twhile True:\n\t\tr = False\n\t\tfor elem in q.limit(1000).offset(offset):\n\t\t r = True\n\t\t yield elem\n\t\toffset += 1000\n\t\tif not r:\n\t\t\tbreak", "def pages(worklist):\n pagination = SortKeyPagination(size=2)\n facets = Facets(\n self._default_library, None, None, order=Facets.ORDER_TITLE\n )\n pages = []\n while pagination:\n pages.append(worklist.works(\n self._db, facets, pagination, self.search\n ))\n pagination = pagination.next_page\n\n # The last page should always be empty -- that's how we\n # knew we'd reached the end.\n assert [] == pages[-1]\n\n # Return all the other pages for verification.\n return pages[:-1]", "def parallel_get_pages(args):\n n_requests, from_id, step, index_name, es = args\n all_sites_arr = []\n for _ in range(n_requests):\n waiting_response_time = 0\n for i in range(5):\n time.sleep(waiting_response_time)\n\n try:\n res = es.search(\n index=index_name,\n body={\n \"from\": from_id,\n \"query\": {\n \"match_all\": {}\n },\n \"size\": step,\n \"sort\": {\n \"site_id\": \"asc\"\n }\n },\n request_timeout=1000\n )\n print(\"Got %d Hits\" % len(res['hits']['hits']))\n\n for site in res['hits']['hits']:\n all_sites_arr.append({\n \"link\": site[\"_source\"][\"link\"],\n \"hyperlinks\": site[\"_source\"][\"hyperlinks\"]\n })\n\n break\n except TransportError as exc:\n print('index setup error', exc)\n\n waiting_response_time = math.exp(i + 1)\n\n from_id += step\n time.sleep(10)\n\n return all_sites_arr", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def build_page(self):\n try:\n page_no = int(self.request.GET.get('page', 1))\n except (TypeError, ValueError):\n raise Http404(\"Not a valid number for page.\")\n\n if page_no < 1:\n raise Http404(\"Pages should be 1 or greater.\")\n\n start_offset = (page_no - 1) * self.results_per_page\n self.results[start_offset:start_offset + self.results_per_page]\n\n paginator = DiggPaginator(self.results, self.results_per_page)\n\n try:\n page = paginator.page(page_no)\n except InvalidPage:\n raise Http404(\"No such page!\")\n\n return (paginator, page)" ]
[ "0.72033846", "0.7023521", "0.6755472", "0.6510711", "0.6369775", "0.62843007", "0.6226141", "0.60758966", "0.6063477", "0.6036647", "0.59961975", "0.59557724", "0.5939197", "0.59337556", "0.59254843", "0.5922733", "0.590334", "0.58724624", "0.580953", "0.5803111", "0.57934695", "0.57743806", "0.57498217", "0.5726635", "0.56801987", "0.56487346", "0.56249815", "0.5618225", "0.5613617", "0.56067157" ]
0.73851967
0
Get the directory the script is being run from
def getScriptDirectory(): return os.path.dirname(os.path.realpath(__file__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_script_directory():\n return os.path.dirname(__file__)", "def getScriptPath():\n\treturn os.path.dirname(os.path.realpath(sys.argv[0]))", "def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "def _current_script_dir(self):\n if self._script_dir:\n return self._script_dir[-1]\n else:\n return None", "def GetScriptDirectory() -> str:\n if (hasattr(GetScriptDirectory, \"dir\")):\n return GetScriptDirectory.dir\n module_path: str = GetScriptFile()\n GetScriptDirectory.dir: str = os.path.dirname(module_path)\n return GetScriptDirectory.dir", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def get_working_dir():\n working_dir = os.path.dirname(os.path.abspath(__file__))\n return working_dir", "def get_main_dir():\n return os.path.dirname(os.getcwd())", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def runner_path():\n git_base = os.popen('git rev-parse --show-toplevel').read().strip()\n return os.path.join(git_base, RUNNER_SCRIPT_BASENAME)", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def get_working_directory():\n return os.getcwd()", "def root_dir():\n return dirname(dirname(__file__))", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def get_working_dir(self):\r\n return self.process.get_working_dir()", "def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def program_dir():\n if (Win32() and (hasattr(sys, 'frozen') or imp.is_frozen('__main__'))):\n # running from exe generated by py2exe\n return os.path.dirname(sys.executable)\n else:\n return sys.path[0]\n # return os.path.dirname(os.path.abspath(sys.argv[0]))", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def root_dir():\r\n return Path(__file__).parent.parent", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def scriptdir(follow_symlinks=True):\n if getattr(sys, 'frozen', False):\n path_ = path.abspath(scriptdir)\n else:\n path_ = getabsfile(scriptdir)\n\n if follow_symlinks:\n path_ = path.realpath(path_)\n\n return path.dirname(path_)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))" ]
[ "0.9032008", "0.85514134", "0.84030205", "0.8204221", "0.8180877", "0.79245794", "0.79154485", "0.7863617", "0.78479075", "0.7799003", "0.77155125", "0.77155006", "0.7712691", "0.7662973", "0.76583695", "0.7639003", "0.7633195", "0.7625982", "0.7607391", "0.7600444", "0.757811", "0.7538895", "0.75343823", "0.75343823", "0.7519481", "0.75046104", "0.75014496", "0.75014496", "0.7483665", "0.7463314" ]
0.8963206
1
Regresa una lista con los archivos .pyx del path dado Si abspath es true, la lista de archivos es el path absoluto de lo contrario es solo el nombre de los mismos
def get_pyx_files(path, abspath=True): path = os.path.normpath(os.path.abspath(path)) to_compile = [] for name in os.listdir(path): if name.endswith(".pyx"): pyx = os.path.join(path,name) if os.path.isfile(pyx): to_compile.append(pyx if abspath else name) return to_compile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_of_files(path):\r\n files_list=[]\r\n path = os.path.abspath(path)\r\n\r\n #if the path is a file name, returns a list of a single file name\r\n if os.path.isfile(path):\r\n files_list.append(path)\r\n #if the path is a directory name, returns a list of all the file names anded with .asm\r\n else:\r\n for file in os.listdir(path):\r\n if file.endswith(\".asm\"):\r\n files_list.append(os.path.join(path, file))\r\n return files_list", "def find(cls, paths):\r\n pythons = []\r\n for path in paths:\r\n for fn in cls.expand_path(path):\r\n basefile = os.path.basename(fn)\r\n if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):\r\n try:\r\n pythons.append(cls.from_binary(fn))\r\n except Exception as e:\r\n TRACER.log('Could not identify %s: %s' % (fn, e))\r\n continue\r\n return pythons", "def lista_arquivos_imagem(caminho):\n import os\n arquivos = os.listdir(caminho)\n arquivos_tmp = []\n for file_name in arquivos:\n arquivos_tmp.append(file_name)\n\n image_extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg', 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n # filtar somente os arquivos que contenham as extensões de imagens compatíves com opencv\n arquivos = ['%s%s.%s' % f for f in arquivos_tmp if f[2] in image_extensions]\n return arquivos", "def scandir(path_):\n return os.listdir", "def expand(self, path_list):\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2", "def buildExecutablesList( path ):\n\n result = []\n for item in os.listdir( path ):\n candidate = path + item\n if not os.path.islink( candidate ):\n continue # Not a symlink at all\n if not os.path.exists( candidate ):\n logging.warning( \"Broken symlink detected: \" + candidate )\n continue # Broken link\n if not os.access( candidate, os.X_OK ):\n logging.warning( \"Symlink to a non-executable file: \" + candidate )\n continue # No permissions to execute\n\n result.append( candidate )\n return result", "def assemble_files():\r\n path = os.path.expanduser(sys.argv[1])\r\n if os.path.isdir(path):\r\n file_root = path + \"/\"\r\n for file in os.listdir(path):\r\n filename = os.path.splitext(file)\r\n if filename[1] == \".asm\":\r\n hack_file_name = file_root + filename[0] + \".hack\"\r\n assemble_file(file_root + file, hack_file_name)\r\n else:\r\n filename = os.path.splitext(path)\r\n hack_file_name = filename[0] + \".hack\"\r\n assemble_file(path, hack_file_name)", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def _resolve_paths(paths):\n allowed_ext = tuple(MIMES.keys())\n\n resolved = []\n for path in paths:\n if os.path.isdir(path):\n resolved.extend(\n entry.path for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n elif os.path.isfile(path) and path.lower().endswith(allowed_ext):\n resolved.append(path)\n return resolved", "def _ClassifyPaths(self, paths):\n arch_paths = []\n obj_paths = []\n bc_paths = []\n for path in paths:\n if path.endswith('.a') or path.endswith('.rlib'):\n # .a files are typically system libraries containing .o files that are\n # ELF files (and never BC files).\n arch_paths.append(path)\n elif bcanalyzer.IsBitcodeFile(os.path.join(self._output_directory, path)):\n # Chromium build tools create BC files with .o extension. As a result,\n # IsBitcodeFile() is needed to distinguish BC files from ELF .o files.\n bc_paths.append(path)\n else:\n obj_paths.append(path)\n return _PathsByType(arch=arch_paths, obj=obj_paths, bc=bc_paths)", "def compile_files(root):\n files = [os.path.join(root, f) for f in os.listdir(root) if not f.startswith(\".\")]\n \n return files", "def FindCheckerFiles(path):\n if not path:\n Logger.fail(\"No source path provided\")\n elif os.path.isfile(path):\n return [ path ]\n elif os.path.isdir(path):\n foundFiles = []\n for root, dirs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in [\".java\", \".smali\"]:\n foundFiles.append(os.path.join(root, file))\n return foundFiles\n else:\n Logger.fail(\"Source path \\\"\" + path + \"\\\" not found\")", "def get_c_files(path):\n clist = []\n for file in os.listdir(path):\n if file.endswith(\".cc\") or file.endswith(\".c\"):\n clist.append(\"%s/%s\" % (path, file))\n return clist", "def __return_movie_file_list(self, movie_path):\n movie_dir = movie_path.rsplit(\"/\",1)[0]\n movie_file_list =[]\n movie_extentionds = self.__movie_file_extensions(self.__file_extentions)\n for x in os.listdir(movie_dir):\n if x.rsplit(\".\",1)[-1]in movie_extentionds:\n movie_file_list.append(movie_dir+\"/\"+x)\t\t\n\t#USUNAC URL Z NAPISY24\n return movie_file_list", "def _fetch_all_images(self, path) -> List[str]:\n files_all = []\n\n for ext in self.exts:\n files_all.extend(glob.glob(join(path, ext)))\n\n return files_all", "def lsFiles(ruta = getcwd()):\r\n files = [arch.name for arch in scandir(ruta) if arch.is_file()]\r\n return files", "def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def find(self, path_list):\n import fnmatch\n path_list2 = []\n for pattern in path_list:\n for root, _, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, pattern):\n path_list2.append(os.path.join(root, filename))\n return path_list2", "def test__build_paths():\n files1 = [\"file1\", \"file2\"]\n userdefined_path = classifier_module.DATA_PATH + classifier_module.USER_DIR\n\n expected_out_files1 = [operator.add(userdefined_path, file)\n for file in files1]\n out_files1 = classifier_module.Classifier._build_paths(files=files1,\n system_bitness=None)\n\n assert len(out_files1) == len(expected_out_files1)\n\n for file_num in range(len(out_files1)):\n assert out_files1[file_num] == expected_out_files1[file_num]", "def compile_dir(path):\r\n to_compile = get_pyx_files(path)\r\n print(\"De:\",path)\r\n if to_compile:\r\n print(\"Se compilaran:\", list(map(os.path.basename,to_compile)))\r\n Cythonize.main( ['-a', '-i'] + to_compile )\r\n else:\r\n print(\"Nada para compilar\")", "def _get_files(path, file, modality):\n p = Path(path)\n res = [p/o for o in file if not o.startswith('.') and is_mods(o, modality)]\n assert len(res)==len(modality) #TODO: Assert message\n return res", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def test_paths_to_plates():\n output = filelister_yoko.paths_to_plates(TEST_PATH_YOKO)\n prefix = os.path.abspath(TEST_PATH_YOKO)\n plate_names = [\"screen-name-batch1_20190213_095340/A000002-PC\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def path_to_bin_files(path):\r\n files_list=list_of_files(path)\r\n for file in files_list:\r\n asm_lines = parse_data(file)\r\n symbols_dict = init_symbols_dictionary()\r\n collect_symbols_and_ignore_coments(asm_lines, symbols_dict)\r\n bin_lines = translate_to_binary(asm_lines, symbols_dict)\r\n create_output(bin_lines, file)", "def walk_dir(path):\r\n\tassets = []\r\n\r\n\tfor file in os.listdir(path):\r\n\t\tif os.path.isdir(path + \"/\" + file):\r\n\t\t\tif not file.startswith(\".\"):\r\n\t\t\t\t# Ignore . dirs (e.g .svn)\r\n\t\t\t\tassets.extend(walk_dir(path + \"/\" + file))\r\n\t\telif file.endswith('.blend'):\r\n\t\t\tassets.append(path + \"/\" + file)\r\n\r\n\treturn assets", "def filter_paths(path):\n return [\"{}/{}\".format(path, f) for f in os.listdir(path) if\n f.endswith(FILE_EXTENSION_VM)]", "def test_paths_to_plates():\n output = filelister_ix.paths_to_plates(TEST_PATH_IX)\n prefix = os.path.abspath(TEST_PATH_IX)\n plate_names = [\"test-plate-1\", \"test-plate-2\",\n \"test-plate-3\", \"test-plate-4\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def obter_lista_arquivos(self):\n if os.path.exists(self.caminho):\n return [arq for arq in self.obter_lista_conteudo() \\\n if os.path.isfile(arq)]\n else:\n return []", "def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]" ]
[ "0.63147795", "0.61795086", "0.616547", "0.6125532", "0.60973084", "0.6002462", "0.59990036", "0.59049416", "0.5877905", "0.5865475", "0.5860523", "0.58581746", "0.57794094", "0.5762498", "0.57599723", "0.5751733", "0.5741181", "0.5737314", "0.5728935", "0.5725836", "0.5716832", "0.57117045", "0.571037", "0.56809676", "0.56760645", "0.56719446", "0.5669529", "0.5660028", "0.5654362", "0.56516993" ]
0.6943939
0
Funcion para cythonize todos los .pyx del path dado inplace
def compile_dir(path): to_compile = get_pyx_files(path) print("De:",path) if to_compile: print("Se compilaran:", list(map(os.path.basename,to_compile))) Cythonize.main( ['-a', '-i'] + to_compile ) else: print("Nada para compilar")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def run_cython(args):\n args = magic.arg_split(args, posix=True)\n filename = args.pop()\n if '--force' not in args:\n args.append('--force')\n ip = get_ipython()\n ip.extension_manager.load_extension('cython')\n with io.open(filename, 'r', encoding='utf-8') as f:\n ip.run_cell_magic('cython', ' '.join(args), f.read())", "def get_pyx_files(path, abspath=True):\r\n path = os.path.normpath(os.path.abspath(path))\r\n to_compile = []\r\n for name in os.listdir(path):\r\n if name.endswith(\".pyx\"):\r\n pyx = os.path.join(path,name)\r\n if os.path.isfile(pyx):\r\n to_compile.append(pyx if abspath else name)\r\n return to_compile", "def pyo():\n local('python -O -m compileall .')", "def pyo():\n local('python -O -m compileall .')", "def build_from_c_and_cpp_files(extensions):\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx', '.py'):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources = sources", "def cython(self, line, cell):\n from sage.misc.cython_c import cython_compile\n return cython_compile(cell)", "def generate_cython_transpile(self, target: build.BuildTarget) -> \\\n T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]:\n static_sources: T.MutableMapping[str, File] = OrderedDict()\n generated_sources: T.MutableMapping[str, File] = OrderedDict()\n cython_sources: T.List[str] = []\n\n cython = target.compilers['cython']\n\n args: T.List[str] = []\n args += cython.get_always_args()\n args += cython.get_buildtype_args(target.get_option(OptionKey('buildtype')))\n args += cython.get_debug_args(target.get_option(OptionKey('debug')))\n args += cython.get_optimization_args(target.get_option(OptionKey('optimization')))\n args += cython.get_option_compile_args(target.get_options())\n args += self.build.get_global_args(cython, target.for_machine)\n args += self.build.get_project_args(cython, target.subproject, target.for_machine)\n args += target.get_extra_args('cython')\n\n ext = target.get_option(OptionKey('language', machine=target.for_machine, lang='cython'))\n\n pyx_sources = [] # Keep track of sources we're adding to build\n\n for src in target.get_sources():\n if src.endswith('.pyx'):\n output = os.path.join(self.get_target_private_dir(target), f'{src}.{ext}')\n element = NinjaBuildElement(\n self.all_outputs, [output],\n self.compiler_to_rule_name(cython),\n [src.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())])\n element.add_item('ARGS', args)\n self.add_build(element)\n # TODO: introspection?\n cython_sources.append(output)\n pyx_sources.append(element)\n else:\n static_sources[src.rel_to_builddir(self.build_to_src)] = src\n\n header_deps = [] # Keep track of generated headers for those sources\n for gen in target.get_generated_sources():\n for ssrc in gen.get_outputs():\n if isinstance(gen, GeneratedList):\n ssrc = os.path.join(self.get_target_private_dir(target), ssrc)\n else:\n ssrc = os.path.join(gen.get_subdir(), ssrc)\n if ssrc.endswith('.pyx'):\n output = os.path.join(self.get_target_private_dir(target), f'{ssrc}.{ext}')\n element = NinjaBuildElement(\n self.all_outputs, [output],\n self.compiler_to_rule_name(cython),\n [ssrc])\n element.add_item('ARGS', args)\n self.add_build(element)\n pyx_sources.append(element)\n # TODO: introspection?\n cython_sources.append(output)\n else:\n generated_sources[ssrc] = mesonlib.File.from_built_file(gen.get_subdir(), ssrc)\n # Following logic in L883-900 where we determine whether to add generated source\n # as a header(order-only) dep to the .so compilation rule\n if not self.environment.is_source(ssrc) and \\\n not self.environment.is_object(ssrc) and \\\n not self.environment.is_library(ssrc) and \\\n not modules.is_module_library(ssrc):\n header_deps.append(ssrc)\n for source in pyx_sources:\n source.add_orderdep(header_deps)\n\n return static_sources, generated_sources, cython_sources", "def cythonize_extensions(extensions):\n from Cython.Build import cythonize\n with cd(config.script_dir/'src'):\n cythonized = cythonize(\n extensions,\n language_level=3,\n nthreads=4,\n annotate=config.debug,\n # https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives # noqa: E501\n compiler_directives={\n 'binding': True,\n 'boundscheck': False,\n 'wraparound': False,\n 'profile': config.debug and not config.pypy,\n 'linetrace': config.debug and not config.pypy,\n 'always_allow_keywords': True,\n 'embedsignature': True,\n 'emit_code_comments': True,\n 'initializedcheck': False,\n 'nonecheck': False,\n 'optimize.use_switch': True,\n # Warns about any variables that are implicitly declared\n # without a cdef declaration\n 'warn.undeclared': False,\n 'warn.unreachable': True,\n 'warn.maybe_uninitialized': False,\n 'warn.unused': True,\n 'warn.unused_arg': False,\n 'warn.unused_result': False,\n 'warn.multiple_declarators': True,\n },\n )\n for cy in cythonized:\n cy.sources[0] = 'src/' + cy.sources[0]\n return cythonized", "def compile_dir_with_numpy(path, cleanup=True):\r\n from distutils.core import setup\r\n import numpy\r\n path = os.path.normpath(os.path.abspath(path))\r\n temp = os.path.join(path,\".temp_build\")\r\n with redirect_sys_argv(os.path.join(path,\"make_virtual_script.py\"), \"build_ext\", \"--inplace\", \"-t\", temp):\r\n setup(\r\n ext_modules = cythonize(\"./*.pyx\", annotate=True),\r\n include_dirs=[numpy.get_include()]\r\n )\r\n if cleanup and os.path.exists(temp):\r\n shutil.rmtree(temp)", "def clsources(ctx):\n\t# TODO Use regex instead\n\t# this is a poor-man's hack, it relies on the assumption that files\n\t# start and end with parenthesis. Put a comment before, and you generate\n\t# wrong code. But the source is under my control anyways.\n\tfloat_srcfolder = ctx.path.find_node(inc + \"/ukoct/opencl/float/sources\")\n\tcommon_clsources = ctx.path.ant_glob(inc + \"/ukoct/opencl/common/**/*.cl\")\n\tfloat_clsources = [\n\t\tfloat_srcfolder.find_node(\"defs.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"coherent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"consistent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"intConsistent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"closed.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"stronglyClosed.inc.cl\"),\n\t]\n\tfloat_clsource = []\n\tfor node in common_clsources:\n\t\tsource = node.read()\n\t\tfloat_clsource.append(source[source.find(\"(\") + 1 : source.rfind(\")\")])\n\tfor node in float_clsources:\n\t\tsource = node.read()\n\t\tfloat_clsource.append(source[source.find(\"(\") + 1 : source.rfind(\")\")])\n\tctx.path.make_node(\"float.cl\").write(''.join(float_clsource))", "def define_extensions(use_cython, use_openmp):\n if sys.platform.startswith('win'):\n # compile args from\n # https://msdn.microsoft.com/en-us/library/fwkeyyhe.aspx\n link_args = []\n compile_args = ['/O2', '/openmp']\n else:\n link_args = []\n compile_args = ['-Wno-unused-function', '-Wno-maybe-uninitialized', '-O3', '-ffast-math']\n if use_openmp:\n compile_args.append('-fopenmp')\n link_args.append('-fopenmp')\n\n if 'anaconda' not in sys.version.lower():\n compile_args.append('-march=native')\n\n # recommended approach is that the user can choose not to\n # compile the code using cython, they can instead just use\n # the .c file that's also distributed\n # http://cython.readthedocs.io/en/latest/src/reference/compilation.html#distributing-cython-modules\n src_ext = '.pyx' if use_cython else '.c'\n names = ['pairwise3']\n modules = [Extension(name,\n [os.path.join(name + src_ext)],\n extra_compile_args = compile_args,\n extra_link_args = link_args) for name in names]\n\n if use_cython:\n return cythonize(modules)\n else:\n return modules", "def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)", "def compile(self,**attrs):\n\n\n\t\tpath_to_txt = os.path.join(self.path_raw,'COCA Text')\n\t\tpath_to_sources = os.path.join(self.path_raw,'coca-sources_2017_12.txt')\n\n\t\tif not os.path.exists(path_to_txt) or not os.path.exists(path_to_sources):\n\t\t\tprint(f'Place in {self.path_raw} the following files:\\n * COCA Text\\n * coca-sources_2017_12.txt')\n\t\t\treturn\n\n\t\t#txt\n\t\tself.compile_txt()\n\t\t# metadata\n\t\tself.compile_metadata()", "def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))", "def useCython():\n global kernels_imp\n if HAS_CYTHON:\n import _kernels\n kernels_imp = _kernels", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def process_tempita(source_name):\n if source_name.endswith(\"pyx.in\"):\n with open(source_name, \"r\", encoding=\"utf-8\") as templated:\n pyx_template = templated.read()\n pyx = Tempita.sub(pyx_template)\n pyx_filename = source_name[:-3]\n with open(pyx_filename, \"w\", encoding=\"utf-8\") as pyx_file:\n pyx_file.write(pyx)\n file_stats = os.stat(source_name)\n try:\n os.utime(\n pyx_filename,\n ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),\n )\n except AttributeError:\n os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))\n source_name = pyx_filename\n return source_name", "def cross_compile(*args, **kwargs):\n return compile(*args, **kwargs)", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def compile_modules(base, output, source, bind=True):\n return compile_files(base, output, source, bind, amd=True)", "def decompile():\n #list of files to decompile and results decompile\n dataprocessor_files = []\n\n #list of files to decompile and results decompile for 1C v7.7\n dataprocessor_files_v7 = []\n\n #list of files to decompile and results decompile for 1C MD\n dataprocessor_files_MD = []\n\n #set the exit code\n exit_code = 0\n\n #Find datapocessor files\n for filename in get_list_of_comitted_files():\n #Check the file extensions\n logging.info(\"file to check %s\" % filename)\n if filename[-3:] == \"ert\":\n dataprocessor_files_v7.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n if filename[-3:] in ['.MD','.md']:\n dataprocessor_files_MD.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n\n dirsource = os.path.abspath(os.path.join(os.path.curdir, \"src\"))\n curabsdirpath = os.path.abspath(os.path.curdir) \n\n if len(dataprocessor_files) > 0:\n #pathbin1c = \"C:\\\\Program Files\\\\1cv82\\8.2.17.153\\\\bin\\\\1cv8.exe\"\n #pathbin1c = \"c:\\\\Program Files (x86)\\\\1cv8\\\\8.3.4.304\\\\bin\\\\1cv8.exe\"\n pathbin1c = get_path_to_1c()\n\n if len(dataprocessor_files_v7) > 0:\n for filename in dataprocessor_files_v7:\n print(\"ert file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n\n print(\"ert file %s\" % fullpathfile )\n\n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname)\n newpath2 = os.path.join(newsourcepath, basename)\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n #print(\"curabsdirpath %s\" % curabsdirpath)\n #print(\"newpath2 %s\" % newpath2)\n #print(\"basename %s\" % basename)\n\n t1 = format(\"gcomp -q -d -F %s -D %s -v --no-ini --no-version --no-empty-mxl\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1]) \n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newpath2 )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)\n\n if len(dataprocessor_files_MD) > 0:\n for filename in dataprocessor_files_MD:\n print(\"MD file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n \n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname, \"MD\")\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n newpath2 = os.path.join(newsourcepath, basename)\n print(\"fullbasename %s\" % fullbasename)\n print(\"newdirname %s\" % newdirname)\n print(\"newsourcepath %s\" % newsourcepath)\n \n t1 = format(\"gcomp -d -v -F %s -D %s\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1])\n\n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newsourcepath )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)", "def test_c_extensions_import():\n import storm_analysis.dbscan.dbscan_c\n \n import storm_analysis.fista.fista_fft_c\n \n import storm_analysis.frc.frc_c\n \n import storm_analysis.L1H.homotopy_imagea_c\n\n import storm_analysis.rolling_ball_bgr.rolling_ball_lib_c\n\n import storm_analysis.sa_library.cs_decon_utilities_c\n import storm_analysis.sa_library.dao_fit_c\n import storm_analysis.sa_library.grid_c\n import storm_analysis.sa_library.ia_utilities_c\n import storm_analysis.sa_library.matched_filter_c\n\n import storm_analysis.sa_utilities.fitz_c\n\n import storm_analysis.simulator.pf_math_c\n import storm_analysis.simulator.draw_gaussians_c\n \n import storm_analysis.spliner.cubic_spline_c\n import storm_analysis.spliner.cubic_fit_c", "def build_extensions(self):\n c = self.compiler.compiler_type\n CF = [] ; LF=[]\n if \"CFLAGS\" in os.environ:\n CF = os.environ.get(\"CFLAGS\").split(\" \")\n if \"LDFLAGS\" in os.environ:\n LF = os.environ.get(\"LDFLAGS\").split(\" \")\n for e in self.extensions:\n if c in copt:\n e.extra_compile_args = copt[ c ] + CF\n e.extra_link_args = lopt[ c ] + LF\n print(\"Customised compiler\",c,e.extra_compile_args,\n e.extra_link_args)\n build_ext.build_ext.build_extensions(self)", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def get_c_files(path):\n clist = []\n for file in os.listdir(path):\n if file.endswith(\".cc\") or file.endswith(\".c\"):\n clist.append(\"%s/%s\" % (path, file))\n return clist", "def _update_c_file(self, lines, filename):\n # same as C header...\n return self._update_header_file(lines, filename)", "def dump_src(path: Path, df: pd.DataFrame, ncc_dir: Path):\n for name in df[\"name\"].values:\n try:\n src = name2ncc_path(name, ncc_dir / \"kernels_cl\", \".cl\")\n dst = path / \"src\" / f\"{name}.cl\"\n shutil.copyfile(src, dst)\n except FileNotFoundError:\n # Not all kernels correspond to OpenCL files. This is fine.\n pass", "def extensions():\n exts = []\n exts.append(\n Extension(\n 'pytng.pytng',\n sources=glob('pytng/src/compression/*.c') + glob(\n 'pytng/src/lib/*.c') + ['pytng/pytng.pyx'],\n include_dirs=[\n \"pytng/include/\", \"{}/include\".format(sys.prefix),\n np.get_include()\n ],\n library_dirs=[\"{}/lib\".format(sys.prefix)],\n libraries=['z'], ))\n\n return cythonize(exts, gdb_debug=False)", "def build_extension(self, ext):\n if sys.platform == \"win32\":\n _clr_compiler = \"C:\\\\Windows\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\csc.exe\"\n else:\n _clr_compiler = \"mcs\"\n cmd = [ \n _clr_compiler,\n \"/target:library\",\n \"clrmagic.cs\"\n ]\n check_call(\" \".join(cmd), shell=True)" ]
[ "0.66523314", "0.6544092", "0.65063745", "0.6070866", "0.6070866", "0.6048853", "0.6015238", "0.5894381", "0.58778536", "0.5804054", "0.57061064", "0.5672842", "0.56022316", "0.5536645", "0.5529895", "0.5502436", "0.54587775", "0.5413615", "0.5393308", "0.534646", "0.53356946", "0.532368", "0.531387", "0.5313439", "0.53062063", "0.5305", "0.52833223", "0.52812076", "0.5277327", "0.5274741" ]
0.6816537
0
Funcion para cythonize todos los .pyx del path dado inplace incluyendo numpy
def compile_dir_with_numpy(path, cleanup=True): from distutils.core import setup import numpy path = os.path.normpath(os.path.abspath(path)) temp = os.path.join(path,".temp_build") with redirect_sys_argv(os.path.join(path,"make_virtual_script.py"), "build_ext", "--inplace", "-t", temp): setup( ext_modules = cythonize("./*.pyx", annotate=True), include_dirs=[numpy.get_include()] ) if cleanup and os.path.exists(temp): shutil.rmtree(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def compile_dir(path):\r\n to_compile = get_pyx_files(path)\r\n print(\"De:\",path)\r\n if to_compile:\r\n print(\"Se compilaran:\", list(map(os.path.basename,to_compile)))\r\n Cythonize.main( ['-a', '-i'] + to_compile )\r\n else:\r\n print(\"Nada para compilar\")", "def get_pyx_files(path, abspath=True):\r\n path = os.path.normpath(os.path.abspath(path))\r\n to_compile = []\r\n for name in os.listdir(path):\r\n if name.endswith(\".pyx\"):\r\n pyx = os.path.join(path,name)\r\n if os.path.isfile(pyx):\r\n to_compile.append(pyx if abspath else name)\r\n return to_compile", "def run_cython(args):\n args = magic.arg_split(args, posix=True)\n filename = args.pop()\n if '--force' not in args:\n args.append('--force')\n ip = get_ipython()\n ip.extension_manager.load_extension('cython')\n with io.open(filename, 'r', encoding='utf-8') as f:\n ip.run_cell_magic('cython', ' '.join(args), f.read())", "def cython(self, line, cell):\n from sage.misc.cython_c import cython_compile\n return cython_compile(cell)", "def build_from_c_and_cpp_files(extensions):\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx', '.py'):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources = sources", "def build_extensions(self):\n numpy_incl = resource_filename('numpy', 'core/include')\n for ext in self.extensions:\n ext.include_dirs.append(numpy_incl)\n\n # This explicitly calls the superclass method rather than the\n # usual super() invocation because distutils' build_class, of\n # which Cython's build_ext is a subclass, is an old-style class\n # in Python 2, which doesn't support `super`.\n cython_build_ext.build_extensions(self)", "def generate_cython_transpile(self, target: build.BuildTarget) -> \\\n T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]:\n static_sources: T.MutableMapping[str, File] = OrderedDict()\n generated_sources: T.MutableMapping[str, File] = OrderedDict()\n cython_sources: T.List[str] = []\n\n cython = target.compilers['cython']\n\n args: T.List[str] = []\n args += cython.get_always_args()\n args += cython.get_buildtype_args(target.get_option(OptionKey('buildtype')))\n args += cython.get_debug_args(target.get_option(OptionKey('debug')))\n args += cython.get_optimization_args(target.get_option(OptionKey('optimization')))\n args += cython.get_option_compile_args(target.get_options())\n args += self.build.get_global_args(cython, target.for_machine)\n args += self.build.get_project_args(cython, target.subproject, target.for_machine)\n args += target.get_extra_args('cython')\n\n ext = target.get_option(OptionKey('language', machine=target.for_machine, lang='cython'))\n\n pyx_sources = [] # Keep track of sources we're adding to build\n\n for src in target.get_sources():\n if src.endswith('.pyx'):\n output = os.path.join(self.get_target_private_dir(target), f'{src}.{ext}')\n element = NinjaBuildElement(\n self.all_outputs, [output],\n self.compiler_to_rule_name(cython),\n [src.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())])\n element.add_item('ARGS', args)\n self.add_build(element)\n # TODO: introspection?\n cython_sources.append(output)\n pyx_sources.append(element)\n else:\n static_sources[src.rel_to_builddir(self.build_to_src)] = src\n\n header_deps = [] # Keep track of generated headers for those sources\n for gen in target.get_generated_sources():\n for ssrc in gen.get_outputs():\n if isinstance(gen, GeneratedList):\n ssrc = os.path.join(self.get_target_private_dir(target), ssrc)\n else:\n ssrc = os.path.join(gen.get_subdir(), ssrc)\n if ssrc.endswith('.pyx'):\n output = os.path.join(self.get_target_private_dir(target), f'{ssrc}.{ext}')\n element = NinjaBuildElement(\n self.all_outputs, [output],\n self.compiler_to_rule_name(cython),\n [ssrc])\n element.add_item('ARGS', args)\n self.add_build(element)\n pyx_sources.append(element)\n # TODO: introspection?\n cython_sources.append(output)\n else:\n generated_sources[ssrc] = mesonlib.File.from_built_file(gen.get_subdir(), ssrc)\n # Following logic in L883-900 where we determine whether to add generated source\n # as a header(order-only) dep to the .so compilation rule\n if not self.environment.is_source(ssrc) and \\\n not self.environment.is_object(ssrc) and \\\n not self.environment.is_library(ssrc) and \\\n not modules.is_module_library(ssrc):\n header_deps.append(ssrc)\n for source in pyx_sources:\n source.add_orderdep(header_deps)\n\n return static_sources, generated_sources, cython_sources", "def cythonize_extensions(extensions):\n from Cython.Build import cythonize\n with cd(config.script_dir/'src'):\n cythonized = cythonize(\n extensions,\n language_level=3,\n nthreads=4,\n annotate=config.debug,\n # https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives # noqa: E501\n compiler_directives={\n 'binding': True,\n 'boundscheck': False,\n 'wraparound': False,\n 'profile': config.debug and not config.pypy,\n 'linetrace': config.debug and not config.pypy,\n 'always_allow_keywords': True,\n 'embedsignature': True,\n 'emit_code_comments': True,\n 'initializedcheck': False,\n 'nonecheck': False,\n 'optimize.use_switch': True,\n # Warns about any variables that are implicitly declared\n # without a cdef declaration\n 'warn.undeclared': False,\n 'warn.unreachable': True,\n 'warn.maybe_uninitialized': False,\n 'warn.unused': True,\n 'warn.unused_arg': False,\n 'warn.unused_result': False,\n 'warn.multiple_declarators': True,\n },\n )\n for cy in cythonized:\n cy.sources[0] = 'src/' + cy.sources[0]\n return cythonized", "def useCython():\n global kernels_imp\n if HAS_CYTHON:\n import _kernels\n kernels_imp = _kernels", "def define_extensions(use_cython, use_openmp):\n if sys.platform.startswith('win'):\n # compile args from\n # https://msdn.microsoft.com/en-us/library/fwkeyyhe.aspx\n link_args = []\n compile_args = ['/O2', '/openmp']\n else:\n link_args = []\n compile_args = ['-Wno-unused-function', '-Wno-maybe-uninitialized', '-O3', '-ffast-math']\n if use_openmp:\n compile_args.append('-fopenmp')\n link_args.append('-fopenmp')\n\n if 'anaconda' not in sys.version.lower():\n compile_args.append('-march=native')\n\n # recommended approach is that the user can choose not to\n # compile the code using cython, they can instead just use\n # the .c file that's also distributed\n # http://cython.readthedocs.io/en/latest/src/reference/compilation.html#distributing-cython-modules\n src_ext = '.pyx' if use_cython else '.c'\n names = ['pairwise3']\n modules = [Extension(name,\n [os.path.join(name + src_ext)],\n extra_compile_args = compile_args,\n extra_link_args = link_args) for name in names]\n\n if use_cython:\n return cythonize(modules)\n else:\n return modules", "def define_extensions():\n import numpy as np\n build_dir = os.environ.get(\"PWD\")\n extra_compile_args=[\n '-O3',\n '-Wall',\n '-Wextra',\n '-Wno-unused-variable',\n '-D CYTHON_TRACE=1' if config.debug else '',\n '-D CYTHON_TRACE_NOGIL=1' if config.debug else '',\n ]\n extra_link_args = ['-Wl,-rpath,' + build_dir + '/src/qtestpy/lib']\n return cythonize_extensions([\n Extension(\n name='qtestpy.adapt',\n sources=['qtestpy/adapt.pyx'],\n include_dirs=['src/qtestpy', 'src/qtestpy/include', np.get_include()],\n library_dirs=['src/qtestpy/lib'],\n libraries=[':e.o'],\n extra_compile_args=[\n *extra_compile_args,\n # https://github.com/cython/cython/issues/2498\n '-D NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION',\n # type punning is used to support GUIDs\n '-Wno-strict-aliasing',\n ],\n extra_link_args = extra_link_args,\n ),\n ])", "def clsources(ctx):\n\t# TODO Use regex instead\n\t# this is a poor-man's hack, it relies on the assumption that files\n\t# start and end with parenthesis. Put a comment before, and you generate\n\t# wrong code. But the source is under my control anyways.\n\tfloat_srcfolder = ctx.path.find_node(inc + \"/ukoct/opencl/float/sources\")\n\tcommon_clsources = ctx.path.ant_glob(inc + \"/ukoct/opencl/common/**/*.cl\")\n\tfloat_clsources = [\n\t\tfloat_srcfolder.find_node(\"defs.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"coherent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"consistent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"intConsistent.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"closed.inc.cl\"),\n\t\tfloat_srcfolder.find_node(\"stronglyClosed.inc.cl\"),\n\t]\n\tfloat_clsource = []\n\tfor node in common_clsources:\n\t\tsource = node.read()\n\t\tfloat_clsource.append(source[source.find(\"(\") + 1 : source.rfind(\")\")])\n\tfor node in float_clsources:\n\t\tsource = node.read()\n\t\tfloat_clsource.append(source[source.find(\"(\") + 1 : source.rfind(\")\")])\n\tctx.path.make_node(\"float.cl\").write(''.join(float_clsource))", "def pyo():\n local('python -O -m compileall .')", "def pyo():\n local('python -O -m compileall .')", "def default_helper_c_code_args():\r\n\r\n return {\r\n \"c_prefix\": \"PyArray\",\r\n \"strides_mul\": 1,\r\n }", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def compile_args():\r\n flags = [flag for flag in config.nvcc.flags.split(' ') if flag]\r\n if config.nvcc.fastmath:\r\n flags.append('-use_fast_math')\r\n cuda_ndarray_cuh_hash = hash_from_file(\r\n os.path.join(os.path.split(theano.sandbox.cuda.__file__)[0],\r\n 'cuda_ndarray.cuh'))\r\n flags.append('-DCUDA_NDARRAY_CUH=' + cuda_ndarray_cuh_hash)\r\n\r\n # numpy 1.7 deprecated the following macros but they didn't\r\n # exist in the past\r\n numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]\r\n if bool(numpy_ver < [1, 7]):\r\n flags.append(\"-D NPY_ARRAY_ENSURECOPY=NPY_ENSURECOPY\")\r\n flags.append(\"-D NPY_ARRAY_ALIGNED=NPY_ALIGNED\")\r\n flags.append(\"-D NPY_ARRAY_WRITEABLE=NPY_WRITEABLE\")\r\n flags.append(\"-D NPY_ARRAY_UPDATE_ALL=NPY_UPDATE_ALL\")\r\n flags.append(\"-D NPY_ARRAY_C_CONTIGUOUS=NPY_C_CONTIGUOUS\")\r\n flags.append(\"-D NPY_ARRAY_F_CONTIGUOUS=NPY_F_CONTIGUOUS\")\r\n\r\n # If the user didn't specify architecture flags add them\r\n if not any(['-arch=sm_' in f for f in flags]):\r\n dev = theano.sandbox.gpuarray.init_dev.device\r\n if dev is None:\r\n raise Exception, \"Trying to compile GPU code without a context\"\r\n if dev.startswith(\"opencl\"):\r\n raise Exception, \"Trying to call nvcc with an OpenCL context\"\r\n assert dev.startswith('cuda')\r\n if dev == 'cuda':\r\n n = theano.sandbox.cuda.use.device_number\r\n else:\r\n n = int(dev[4:])\r\n p = theano.sandbox.cuda.device_properties(n)\r\n flags.append('-arch=sm_' + str(p['major']) + str(p['minor']))\r\n\r\n return flags", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def maybe_cythonize_extensions(top_path, config):\n is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))\n\n if is_release:\n build_from_c_and_cpp_files(config.ext_modules)\n else:\n message = ('Please install cython with a version >= {0} in order '\n 'to build a scikit-learn development version.').format(\n CYTHON_MIN_VERSION)\n try:\n import Cython\n if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:\n message += ' Your version of Cython was {0}.'.format(\n Cython.__version__)\n raise ValueError(message)\n from Cython.Build import cythonize\n except ImportError as exc:\n exc.args += (message,)\n raise\n\n config.ext_modules = cythonize(config.ext_modules)", "def compile_args():\r\n flags = [flag for flag in config.nvcc.flags.split(' ') if flag]\r\n if config.nvcc.fastmath:\r\n flags.append('-use_fast_math')\r\n cuda_ndarray_cuh_hash = hash_from_file(\r\n os.path.join(os.path.split(__file__)[0], 'cuda_ndarray.cuh'))\r\n flags.append('-DCUDA_NDARRAY_CUH=' + cuda_ndarray_cuh_hash)\r\n\r\n # numpy 1.7 deprecated the following macro but the didn't\r\n # existed in the past\r\n numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]\r\n if bool(numpy_ver < [1, 7]):\r\n flags.append(\"-D NPY_ARRAY_ENSURECOPY=NPY_ENSURECOPY\")\r\n flags.append(\"-D NPY_ARRAY_ALIGNED=NPY_ALIGNED\")\r\n flags.append(\"-D NPY_ARRAY_WRITEABLE=NPY_WRITEABLE\")\r\n flags.append(\"-D NPY_ARRAY_UPDATE_ALL=NPY_UPDATE_ALL\")\r\n flags.append(\"-D NPY_ARRAY_C_CONTIGUOUS=NPY_C_CONTIGUOUS\")\r\n flags.append(\"-D NPY_ARRAY_F_CONTIGUOUS=NPY_F_CONTIGUOUS\")\r\n\r\n # If the user didn't specify architecture flags add them\r\n if not any(['-arch=sm_' in f for f in flags]):\r\n # We compile cuda_ndarray.cu during import.\r\n # We should not add device properties at that time.\r\n # As the device is not selected yet!\r\n # TODO: re-compile cuda_ndarray when we bind to a GPU?\r\n import theano.sandbox.cuda\r\n if hasattr(theano.sandbox, 'cuda'):\r\n n = theano.sandbox.cuda.use.device_number\r\n if n is None:\r\n _logger.warn(\r\n \"We try to get compilation arguments for CUDA\"\r\n \" code, but the GPU device is not initialized.\"\r\n \" This is probably caused by an Op that work on\"\r\n \" the GPU that don't inherit from GpuOp.\"\r\n \" We Initialize the GPU now.\")\r\n theano.sandbox.cuda.use(\r\n \"gpu\",\r\n force=True,\r\n default_to_move_computation_to_gpu=False,\r\n move_shared_float32_to_gpu=False,\r\n enable_cuda=False)\r\n n = theano.sandbox.cuda.use.device_number\r\n p = theano.sandbox.cuda.device_properties(n)\r\n flags.append('-arch=sm_' + str(p['major']) +\r\n str(p['minor']))\r\n\r\n return flags", "def dump_src(path: Path, df: pd.DataFrame, ncc_dir: Path):\n for name in df[\"name\"].values:\n try:\n src = name2ncc_path(name, ncc_dir / \"kernels_cl\", \".cl\")\n dst = path / \"src\" / f\"{name}.cl\"\n shutil.copyfile(src, dst)\n except FileNotFoundError:\n # Not all kernels correspond to OpenCL files. This is fine.\n pass", "def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)", "def build_extensions(self):\n c = self.compiler.compiler_type\n CF = [] ; LF=[]\n if \"CFLAGS\" in os.environ:\n CF = os.environ.get(\"CFLAGS\").split(\" \")\n if \"LDFLAGS\" in os.environ:\n LF = os.environ.get(\"LDFLAGS\").split(\" \")\n for e in self.extensions:\n if c in copt:\n e.extra_compile_args = copt[ c ] + CF\n e.extra_link_args = lopt[ c ] + LF\n print(\"Customised compiler\",c,e.extra_compile_args,\n e.extra_link_args)\n build_ext.build_ext.build_extensions(self)", "def load_numpy_core_multiarray(finder, module):\n module.AddGlobalName(\"arange\")", "def extensions():\n exts = []\n exts.append(\n Extension(\n 'pytng.pytng',\n sources=glob('pytng/src/compression/*.c') + glob(\n 'pytng/src/lib/*.c') + ['pytng/pytng.pyx'],\n include_dirs=[\n \"pytng/include/\", \"{}/include\".format(sys.prefix),\n np.get_include()\n ],\n library_dirs=[\"{}/lib\".format(sys.prefix)],\n libraries=['z'], ))\n\n return cythonize(exts, gdb_debug=False)", "def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))", "def get_c_files(path):\n clist = []\n for file in os.listdir(path):\n if file.endswith(\".cc\") or file.endswith(\".c\"):\n clist.append(\"%s/%s\" % (path, file))\n return clist", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def getJacobs(base_dir='KnownLenses/Jacobs_KnownLenses/'):\n\n known_jacobs = []\n for root, dirs, files in os.walk(base_dir):\n for folder in dirs:\n known_jacobs.append(os.path.join(root, folder))\n num_data_targets = len(known_jacobs)\n data_known_jacobs = np.zeros([num_data_targets, 3, 100, 100])\n\n for var in range(len(known_jacobs)):\n # g_name = get_pkg_data_filename(known_jacobs[var] + '/g_WCSClipped.fits')\n # r_name = get_pkg_data_filename(known_jacobs[var] + '/r_WCSClipped.fits')\n # i_name = get_pkg_data_filename(known_jacobs[var] + '/i_WCSClipped.fits')\n\n g_name = get_pkg_data_filename(known_jacobs[var] + '/g_norm.fits')\n r_name = get_pkg_data_filename(known_jacobs[var] + '/r_norm.fits')\n i_name = get_pkg_data_filename(known_jacobs[var] + '/i_norm.fits')\n\n g = fits.open(g_name)[0].data[0:100, 0:100]\n r = fits.open(r_name)[0].data[0:100, 0:100]\n i = fits.open(i_name)[0].data[0:100, 0:100]\n\n data_known_jacobs[var] = [g, r, i]\n return data_known_jacobs" ]
[ "0.6818972", "0.6327408", "0.63241965", "0.6169207", "0.5705685", "0.565066", "0.563825", "0.5581065", "0.5533468", "0.5440001", "0.53392947", "0.5276428", "0.52711195", "0.52364916", "0.52364916", "0.51878566", "0.5183364", "0.5174798", "0.5170791", "0.5147748", "0.5139927", "0.51341206", "0.5125734", "0.5089514", "0.5078503", "0.5066097", "0.5043332", "0.504221", "0.5016699", "0.5013252" ]
0.63986754
1
Display a digit. number the number (09) to display offset the leftmost column of the displayed digit color the RGB color to use to display the digit force_zero whether to leave a 0 blank (False) or display it
def display_digit(number, offset, color, force_zero): bits = number_patterns[number] for row in range(4): for col in range(3): if bits[row][col] == " " or (number == 0 and not force_zero): trellis.pixels[col + offset, row] = (0, 0, 0) else: trellis.pixels[col + offset, row] = color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_number(number, color):\n if number >= 500 or number < 0:\n return False\n display_digit(number % 10, 5, color, True)\n display_digit((number // 10) % 10, 1, color, number >= 100)\n hundreds = number // 100\n for h in range(4):\n if h + 1 == hundreds:\n trellis.pixels[0, h] = (255, 255, 255)\n else:\n trellis.pixels[0, h] = (0, 0, 0)\n return True", "def draw_digit( self, value, position ):\n\t\tx_start = position * 4\n\t\t_font = FONT[value] # value=4 -> [0b00001111,0b00001000,0b01111111],\n\t\tfor col in range( len(_font) ):\n\t\t\tfor y in range( 0, 8 ): # Only the 7 firsts bits\n\t\t\t\tbit_weight = pow( 2, y )\n\t\t\t\tif _font[col] & bit_weight == bit_weight:\n\t\t\t\t\tself.fb.pixel( x_start+col, y, 1 ) # Draw the Pixel", "def show_digit(digit):\n\n # Create a window for the digit. The digit is 14x14, so create a window \n # which is 150x150. We'll leave a border of 5 pixels, and each digit\n # \"pixel\" will be 10x10\n\n master = Tk()\n\n canvas = Canvas(master, width=150, height=150)\n canvas.pack()\n\n # Draw a rectange for each pixel in the digit\n for i in range(14):\n y = 10*i + 5\n for j in range(14):\n x = 10*j + 5\n \n\n # Determine the hex value of this pixel color\n pixel_value = digit[14*i + j]\n pixel_hex = hex(int(pixel_value*255)).replace('0x','')\n pixel_hex = '#' + pixel_hex + pixel_hex + pixel_hex\n \n # Draw the rectangle\n canvas.create_rectangle(x, y, x+10, y+10, fill=pixel_hex)\n\n # Done!\n return canvas", "def print_digit(row):\n string = ''\n for j in range(DIGIT_HEIGHT):\n for i in range(DIGIT_WIDTH):\n if row[j + DIGIT_WIDTH*i] > 0:\n string += '1'\n else:\n string += '0'\n string += '\\n'\n print string", "def print_digit(d):\r\n if d == 1:\r\n print(\":::||\", end=\"\")\r\n elif d == 2:\r\n print(\"::|:|\", end=\"\")\r\n elif d == 3:\r\n print(\"::||::\", end=\"\")\r\n elif d == 4:\r\n print(\":|::|\", end=\"\")\r\n elif d == 5:\r\n print(\":|:|:\", end=\"\")\r\n elif d == 6:\r\n print(\":||::\", end=\"\")\r\n elif d == 7:\r\n print(\"|:::|\", end=\"\")\r\n elif d == 8:\r\n print(\"|::|:\", end=\"\")\r\n elif d == 9:\r\n print(\"|:|::\", end=\"\")\r\n elif d == 0:\r\n print(\"||:::\", end=\"\")\r\n else:\r\n print(\"Invalid digit\")", "def draw_number(self):\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))", "def draw_numbers(self):\n for i in range(9):\n for j in range(9):\n pos = self.get_pos_in_grid(i, j)\n text = self.grid[i][j]\n text = '' if text == 0 else str(text)\n self.text_to_screen(text, pos)", "def digit( spr, d, bw, x, y ):\n\t# byte spr, byte d, byte bw, int x, int y)\n\tgd.sprite(spr, x, y, digits() + d, 2 + bw, 0)\n\tgd.sprite(spr + 1, x, y + 16, digits() + d + 11, 2 + bw, 0)", "def grid(digit):\n try:\n return DIGIT_TO_GLYPH[int(digit)]\n except IndexError:\n raise ValueError(\"Unknown digit\")", "def print_grid(gr):\n for i in range(0,9):\n if((i % 3) == 0):\n print('- - - - - - - - - - - - - - - -')\n for j in range(0,9):\n if((j % 3) == 0):\n print('|', end='')\n \n val = str(gr[i][j])\n if(val == '0'):\n val = ' '\n \n print(' ' + val + ' ', end = '')\n print('|')\n print('- - - - - - - - - - - - - - - -')", "def display_led(my_bus, num):\n if num < 0:\n write_led(my_bus, num_map['0'], num_map['0'], num_map['0'], num_map['0'])\n elif 0 <= num <= 9:\n write_led(my_bus, num_map['0'], num_map['0'], num_map['0'], num_map[str(num)])\n elif 10 <= num <= 99:\n str_num = str(num)[:2]\n write_led(my_bus, num_map['0'], num_map['0'], num_map[str_num[0]], num_map[str_num[1]])\n elif 100 <= num <= 999:\n str_num = str(num)[:3]\n write_led(my_bus, num_map['0'], num_map[str_num[0]], num_map[str_num[1]], num_map[str_num[2]])\n else:\n str_num = str(num)[:4]\n write_led(my_bus, num_map[str_num[0]], num_map[str_num[1]], num_map[str_num[2]], num_map[str_num[3]])", "def from_val(value: int) -> str:\n return f\"\\033[{value}m\"", "def get_char_to_display(in_value):\n\treturn '#' if in_value else ' '", "def print_dashes(num: int, dash: str = '#') -> str:\n\n # Gets the terminal width\n num_col = shutil.get_terminal_size((80, 20)).columns\n\n return dashed_line(num if num <= num_col else num_col, dash)", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def colorize(lead, num, color):\n if num != 0 and ANSIBLE_COLOR and color is not None:\n return \"%s%s%-15s\" % (stringc(lead, color), stringc(\"=\", color), stringc(str(num), color))\n else:\n return \"%s=%-4s\" % (lead, str(num))", "def steady_numbers(self):\n for y in range(9):\n for x in range(9):\n if not self.grid[y][x] == 0:\n n = self.font.render(str(self.grid[y][x]), 1, 'black')\n self.screen.blit(n, ((self.x_pos + x * 80), (self.y_pos + y * 80)))", "def draw_number(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n if(x>0):\n while(x>0):\n print(\"+\",end='')\n x = x-1\n print(\"\",end=\"\\n\")\n if(x<0):\n while(x<0):\n print(\"-\",end='')\n x = x+1\n print(\"\",end=\"\\n\")", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def display_number(number1, number2):\n top = TOP_DISPLAY[STATES[index_state]]\n ret_tab = [O]*64\n for i in range(8):\n for j in range(3):\n ret_tab[8 * j + i] = top[j][i]\n for i in range(3):\n for j in range(5):\n ret_tab[8 * (j+3) + i+1] = [255, 255, 255] if number1[j][i] == 1 else [0, 0, 0]\n ret_tab[8 * (j+3) + i+5] = [255, 255, 255] if number2[j][i] == 1 else [0, 0, 0]\n sense.set_pixels(ret_tab)", "def format(self, num):\n if self._negative:\n if num > 0:\n digit_fmt = \"0{0}\".format(self._digits)\n return \" {{:{}b}}\".format(digit_fmt).format(num)\n else:\n digit_fmt = \"0{0}\".format(self._digits + 1)\n return \"{{:{}b}}\".format(digit_fmt).format(num)\n else:\n digit_fmt = \"0{0}\".format(self._digits)\n return \"{{:{}b}}\".format(digit_fmt).format(num)", "def __str__(self):\n return textwrap.fill('{:064b}'.format(self.num), 8)", "def textColor(colorNumber):\n return '\\033[%dm' % (30 + colorNumber)", "def display_number_with_default(self):\r\n if self.display_coursenumber:\r\n return self.display_coursenumber\r\n\r\n return self.number", "def phoneDisplay(number):\n return number[0:3] + \"&nbsp;&middot;&nbsp;\" + number[3:6] + \"&nbsp;&middot;&nbsp;\" + number[6:10]", "def blueline(self):\n\t\treturn self.ESC+\"34m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\"+self.ESC+\"0m\\r\\n\"", "def toColor(n):\n color = ('%X'%(n+ID_OFFSET)).rjust(6,'0')\n if not len(color) == 6:\n raise ColorError(n)\n else:\n r = int(color[0:2], 16)\n g = int(color[2:4], 16)\n b = int(color[4:6], 16)\n return '%.3d %.3d %.3d'%(r,g,b)", "def grid_coord(num: str) -> 'pygame.font':\n text = font.render(num, False, gray)\n return text", "def my_print(self):\n if self.size == 0:\n print(\"\")\n return\n for j in range(self.__position[1]):\n print(\"\")\n for i in range(self.size):\n if self.__position[0] > 0:\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.size)", "def display_digits():\n digits = load_digits()\n print(digits.DESCR)\n fig = plt.figure()\n for i in range(10):\n subplot = fig.add_subplot(5, 2, i+1)\n subplot.matshow(numpy.reshape(digits.data[i], (8, 8)), cmap='gray')\n\n plt.show()" ]
[ "0.695156", "0.6788571", "0.66527975", "0.6614041", "0.6437634", "0.6332502", "0.61967576", "0.6093905", "0.60627466", "0.6041989", "0.599062", "0.5990346", "0.5939854", "0.5918779", "0.59041363", "0.5903598", "0.588926", "0.58889186", "0.58847225", "0.58732307", "0.5850803", "0.5808245", "0.58022237", "0.57971406", "0.57927126", "0.57866275", "0.5779002", "0.5749724", "0.572268", "0.5676134" ]
0.8424535
0
Perform an animation (displaying random numbers) before displaying the requested number. number the number to eventually display color the color to use (indicates the type of dice used)
def animate_to(number, color): for _ in range(10): trellis.pixels.fill((0, 0, 0)) display_number(random.randint(10, 99), color) time.sleep(0.1) trellis.pixels.fill((0, 0, 0)) display_number(number, color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_number(number, color):\n if number >= 500 or number < 0:\n return False\n display_digit(number % 10, 5, color, True)\n display_digit((number // 10) % 10, 1, color, number >= 100)\n hundreds = number // 100\n for h in range(4):\n if h + 1 == hundreds:\n trellis.pixels[0, h] = (255, 255, 255)\n else:\n trellis.pixels[0, h] = (0, 0, 0)\n return True", "def demo_a_number(random_number):", "def fireRandom():\n#the increment and the A1-H8 define the possible colors the pixels can be\n\tincrement = 20\n A1 = Color(255, 0, 0)\n B2 = Color(255, increment, 0)\n C3 = Color(255, increment * 2, 0)\n D4 = Color(255, increment * 3, 0)\n E5 = Color(255, increment * 4, 0)\n F6 = Color(255, increment * 5, 0)\n G7 = Color(255, increment * 6, 0)\n H8 = Color(255, increment * 7, 0)\n#the for loop with i and the number of pixels cycles through all the pixels we have so that they each get called when a random number does\n for i in range (strip.numPixels()):\n threeQuarters = randrange(0, 4)\n if threeQuarters == 1 or threeQuarters == 2 or threeQuarters == 3 :\n#the above if statement and the threeQuarters variable makes it so that the pixels only recieve a color 75% of the time. This makes a flicker.\n shade = randrange(0,9)\n\n if shade == 1:\n strip.setPixelColor(i, A1)\n strip.show()\n elif shade == 2:\n strip.setPixelColor(i, B2)\n strip.show()\n elif shade == 3:\n strip.setPixelColor(i, C3)\n strip.show()\n elif shade == 4:\n strip.setPixelColor(i, D4)\n strip.show()\n elif shade == 5:\n strip.setPixelColor(i, E5)\n strip.show()\n elif shade == 6:\n strip.setPixelColor(i, F6)\n strip.show()\n elif shade == 7:\n strip.setPixelColor(i, G7)\n\t\t\t\tstrip.show()\n\t\t\telse: \n\t\t\t\tstrip.setPixelColor(i, H8)\n\t\t\t\tstrip.show()", "def light_number(self, number, position):\n for [x, y] in number:\n uh.set_pixel(x+position[0], y+position[1], 183, 0, 255)\n uh.show()", "def draw_number(self, number):\n\t\tglobal changed_rects\n\n\t\tnumber = number -1\t\t# correct to correct index\n\t\tself.image.fill((255, 255, 255))\n\t\tpygame.draw.lines(\n\t\t\tself.image,\n\t\t\t(0, 0, 0),\t\t# black\n\t\t\tTrue,\t\t\t# connects last to first point\n\t\t\t[(0, 0), (self.size, 0), (self.size, self.size), (0, self.size)],\n\t\t\tself.size//20)\t\t\t# line width\n\t\tfor pip in NUMBERS[number]:\n\t\t\tpygame.draw.circle(\n\t\t\t\tself.image,\n\t\t\t\t(0, 0, 0),\n\t\t\t\t(self.size*pip).astype(int),\n\t\t\t\tself.size//10)\t\t\t\t\t# radius\n\t\t# Add the rect to the changed rects list (doesnt work on init)\n\t\tchanged_rects.append(self.rect)\n\n\t\treturn self.image", "def draw_number(self):\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))", "def advanceDice():\n global diceAngle, ANGLE_STEP, counter, animateDice\n if counter >= 300: \n animateDice = False # stop the animation after a few seconds \n diceAngle += ANGLE_STEP\n counter += 1", "def draw_number(n, dot='*'):\r\n c, f, b, s = False, False, False, False\r\n if n in [1,3,5]: c = True\r\n if n in [2,4,5,6]: f = True\r\n if n in [3,4,5,6]: b = True\r\n if n in [6]: s = True\r\n return draw_dice(c, f, b, s, dot)", "async def roll(self, ctx, number : int = 100):\r\n author = ctx.message.author\r\n if number > 1:\r\n n = randint(1, number)\r\n await self.bot.say(\"{} :game_die: {} :game_die:\".format(author.mention, n))\r\n else:\r\n await self.bot.say(\"{} Maybe higher than 1? ;P\".format(author.mention))", "def intro_dots():\n i = 0\n time.sleep(0.6)\n while i != 3:\n color.write(\".\")\n time.sleep(0.3)\n i += 1\n print(\"\")", "def tellGameNumber(self):\n t = time.time() - self.start_time\n d = self.start_duration\n if t < d:\n c = int(255 * (1 - (t / d)))\n self.window.alert(\"Starting game number \" + str(self.game_number))", "def hide_number(progression):\n hidden_number_index = random.randint(0, len(progression) - 1)\n hidden_number = progression[hidden_number_index]\n progression[hidden_number_index] = '..'\n return ' '.join(progression), hidden_number", "def roll_ball():\n number_rolled = random.randint(0, 37)\n if green.count(number_rolled) > 0:\n color_rolled = \"green\"\n elif red.count(number_rolled) > 0:\n color_rolled = \"red\"\n elif black.count(number_rolled) > 0:\n color_rolled = \"black\"\n\n print(\"\\nNumber: %i - Color: %s\" % (number_rolled, color_rolled))\n return(number_rolled, color_rolled)", "def display_number_picker(number1, number2):\n for i in range(3):\n for j in range(5):\n sense.set_pixel(i+1, j+3, [255, 255, 255]) if number1[j][i] == 1 else sense.set_pixel(i+1, j+3, [0, 0, 0])\n sense.set_pixel(i+5, j+3, [255, 255, 255]) if number2[j][i] == 1 else sense.set_pixel(i+5, j+3, [0, 0, 0])\n offset = 0\n if index_picker % 2 == 1:\n offset = 4\n for i in range(3):\n for j in range(2):\n sense.set_pixel(i+1+offset, j, [R[0], R[1], R[2]])if ARROW[j][i] == 1 else sense.set_pixel(i+1+offset, j, [0, 0, 0])", "def color_chase(self, color: tuple = CYAN, wait: float = DEFAULT_SPEED):\n for i in range(self.np.n):\n self.np[i] = color\n time.sleep(wait)\n self.np.show()\n return True", "def display_number(number1, number2):\n top = TOP_DISPLAY[STATES[index_state]]\n ret_tab = [O]*64\n for i in range(8):\n for j in range(3):\n ret_tab[8 * j + i] = top[j][i]\n for i in range(3):\n for j in range(5):\n ret_tab[8 * (j+3) + i+1] = [255, 255, 255] if number1[j][i] == 1 else [0, 0, 0]\n ret_tab[8 * (j+3) + i+5] = [255, 255, 255] if number2[j][i] == 1 else [0, 0, 0]\n sense.set_pixels(ret_tab)", "def roll(self):\n\t\trnd = random.randint(1, 6)\n\t\tself.draw_number(rnd)\n\t\treturn rnd", "def demo_a_number():\n random_number=randint(0,100)\n number=randint(0,100)\n print (random_number)\n print (number)\n if number == random_number:\n print('correct number')\n while number!=random_number:\n if number >random_number:\n print('number too high')\n number=randint(0,number)\n print(number)\n else:\n print('number too low')\n number=randint(number,100)\n print(number)\n print ('correct number: ')\n print(number)", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def roll_dice():\n print(colored(\"Lanzando tu dado...\", \"green\", attrs=['bold']))\n while True:\n dice = random.randint(1, 6)\n if dice != 3:\n return dice\n else:\n print(colored(\"Tu dado es 3, lancemos de nuevo\", \"green\", attrs=['bold']))\n continue", "def draw(self):\n if self.visible:\n glColor3f(self.r, self.g, self.b)\n graphicsBall(self.x, self.y, self.radius)\n\n if self.number <= 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n graphicsBall(self.x, self.y, self.radius / 2)\n\n if self.number > 0:\n if self.number > 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n if self.number < 10:\n graphicsText(self.x - 2, self.y - 3.5, str(self.number))\n else:\n graphicsText(self.x - 4.5, self.y - 3.5, str(self.number))", "def setRandomColor():\n setColor(getRandomColor())", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def draw_250():\r\n red = 0\r\n grn = 0\r\n arr = urn_setup()\r\n for v in range(250):\r\n n = np.random.randint(0,len(arr))\r\n x, arr = draw_remove_element(arr, np.random.randint(0,len(arr)))\r\n if x == 'R':\r\n red += 1\r\n elif x == 'G':\r\n grn += 1\r\n print(\"Red: %d :: Grn: %d\" % (red, grn))\r\n return red, grn", "def theater_chase(strip, colors, run_time=RUN_TIME):\n timeout_start = time.time()\n while time.time() < timeout_start + run_time:\n for color in colors:\n for j in range(10):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, COLOR_CODES[color])\n strip.show()\n time.sleep(50/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)", "def mutate_color(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_color(mutated_genome)\n elif seed == 1:\n change_color(mutated_genome)\n else: #seed == 2:\n switch_colors(mutated_genome)\n #else: seed == 3: # depricated\n # shuffle_colors(mutated_genome)", "def animation1(strip, wait_ms=1, range_begin=0, range_end=-1, iteration_step=-1):\n if range_end == 1:\n range_end = strip.numPixels()\n\n j = iteration_step\n\n pixel_to_change = iteration_step % (range_end - range_begin) + range_begin\n\n if pixel_to_change - range_begin == 0:\n for i in range(range_begin, range_end):\n strip.setPixelColor(i, Color(0, 0, 255))\n strip.show()\n time.sleep(wait_ms/1000)\n for i in range(range_begin, range_end):\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n #time.sleep(wait_ms/1000.0)", "def theaterChase(strip, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)", "def mutate_color(color):\n color[random.randrange(0, 3)] = random.random() % 1\n return color", "def next_generation(self, color):\n\t\t# add color to history\n\t\tself.current.set(color)\n\t\tif not color in self.history.get(0, self.history.size()):\n\t\t\tself.history.insert(0, color)\n\t\t\tself.history.itemconfig(0, background=color)\n\t\t# calculate new generation of colors\n\t\tfor (col, row) in itertools.product(range(3), range(3)):\n\t\t\tlabel = self.labels[row][col]\n\t\t\tlabel['bg'] = modify_color(color, self.sigma.get())\n\t\tself.center['bg'] = color" ]
[ "0.64256155", "0.6405366", "0.6212849", "0.59545386", "0.5949734", "0.58733165", "0.58445084", "0.5805371", "0.57400924", "0.57233775", "0.5666692", "0.56376994", "0.5635846", "0.5585724", "0.5536559", "0.5530062", "0.55047804", "0.54716986", "0.5459184", "0.5436751", "0.542917", "0.5427509", "0.5401528", "0.5371092", "0.5358417", "0.5344992", "0.53442234", "0.5338349", "0.533513", "0.5328976" ]
0.77046764
0
Generate a random dice roll. Returns the total of the roll. number the number of dice to roll sides the number of side on dice to roll (4, 6, 8, 10, 12, 20)
def roll(number, sides): total = 0 for _ in range(number): total += random.randint(1, sides + 1) return total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def roll_dice(number,faces):\n \n dice_number = 0\n for i in range(number):\n dice_number += random.randint(1, faces)\n return dice_number", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def roll_dice(num_dice, die_type):\n result = 0\n for i in range(num_dice):\n result += random.randint(1, die_type)\n\n return result", "def roll_die(self):\n number = randint(1, self.sides) \n print(number)", "def diceRoll():\n return randint(1,6)", "def roll(d=20):\n\treturn random.randint(1, d)", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it.", "def roll_dice():\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)", "def roll_dice(num_of_dice=1):\r\n sides = 6\r\n return [random.randrange(1, sides+1) for _ in xrange(num_of_dice)]", "def roll(self):\n total = 0\n\n if self.num_dice is not None and self.dice_type is not None:\n for _ in range(self.num_dice):\n total += randint(1, self.dice_type)\n elif self.min_value is not None and self.max_value is not None:\n total = randint(self.min_value, self.max_value)\n\n return total + self.plus", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def roll(self):\n return random.randrange(1, sides + 1)", "def rollDie(self):\n return random.randint(1, self.sides)", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n roll_sum = 0 # sums values of rolled dice\n ones_total = 0 # counts number of times the value 1 is rolled\n while num_rolls>0:\n current_roll = dice()\n if current_roll==1:\n ones_total += 1\n roll_sum += current_roll\n num_rolls -= 1\n if ones_total > 0:\n return ones_total\n else:\n return roll_sum\n # END PROBLEM 1", "def roll(self):\n\t\treturn randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.sides)", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def roll_2_dice():\n return random.randint(2, 13)" ]
[ "0.8306328", "0.80792737", "0.8031863", "0.79754895", "0.7949489", "0.78290707", "0.77576977", "0.77334857", "0.7669618", "0.7665575", "0.7655441", "0.7631616", "0.76303476", "0.7616661", "0.75856316", "0.7581803", "0.75760746", "0.7549325", "0.7533779", "0.7507938", "0.74926686", "0.7477558", "0.7477558", "0.7477558", "0.7477558", "0.7474715", "0.7465982", "0.7465982", "0.740293", "0.7391298" ]
0.8385434
0
Detect when the Trellis is shaken.
def shaken(): global previous_reading result = False x, y, z = accelerometer.acceleration if previous_reading[0] is not None: result = (math.fabs(previous_reading[0] - x) > bound and math.fabs(previous_reading[1] - y) > bound and math.fabs(previous_reading[2] - z) > bound) previous_reading = (x, y, z) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def always_touching(self):\n assert int(self.snake[0].real - self.snake[1].real) in [1, 0, -1] and int(\n self.snake[0].real - self.snake[1].real) in [1, 0, -1]", "def take_turn(self):\n if self.fired:\n return None\n\n self.tick_needs()\n # TODO: Currently dropping Trash, stuff that doesn't satisfy, where ever\n # May want to look for Trash Can at some point\n # Dropping first Trash item found when inventory full\n if self.inventory_full():\n trash = filter(lambda x: any(s not in self.needs for s in x.satisfies), self.inventory)\n for t in trash:\n print(f\"{self.name} dropped {t.name}\")\n self.drop_item(t)\n break\n\n # If not preoccupied, check needs and do stuff\n if not self.occupied:\n self.check_needs()\n self.move_to_target()", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def shooting(self):\r\n return not self.stopped", "def stats_change(self):\n return True if self.board.prev_state != self.board.shot_count else False", "def heal(self):\n self.infected = False", "def checkForPickup(self):\n if self.counter == 0:\n if self.game.player.reticule in self.overlapping_sprites and (games.keyboard.is_pressed(games.K_a) \\\n or games.keyboard.is_pressed(games.K_d)):\n self.counter = 15\n if self.held == 0:\n self.game.player.held_item = self\n self.held = 1\n self.y = self.game.player.y\n else:\n self.game.player.held_item = None\n self.held = 0", "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1", "def is_hold(self):\n status = self.gpio.input(self.pin)\n\n if status == 1:\n if not self.time_set_status:\n self.time_set_status = time.time()\n if time.time() - self.time_set_status > self.hold_time:\n self.time_set_status = time.time()\n return 1\n else:\n self.time_set_status = None\n return 0", "def _check_bullet_cooldown(self):\n time_now = pygame.time.get_ticks()\n if time_now - self.last_bullet_fired >= self.bullet_cooldown:\n self.shoot_disabled = False", "def in_check(self):\r\n if self.turn_white:\r\n return self.square_under_attack(self.wKingPos[0], self.wKingPos[1])\r\n else:\r\n return self.square_under_attack(self.bKingPos[0], self.bKingPos[1])", "def lander_failure(self):\n failures_list = [\"Right Rotation\",\n \"Left Rotation\",\n \"Thrust\"\n ]\n if self.failure_ticks == 0:\n self.failure = 0\n if random.uniform(0, 1) < FAILURE_CHANCE:\n self.failure_ticks += FAILURE_DURATION\n self.failure = random.choice(failures_list)\n return False\n else:\n self.failure_ticks -= 1\n return True", "def gsteady(self, Ppump):\n return(self.steadystate(Ppump)[1])", "def crowned(self): # called when this piece has become a 'King'\r\n \r\n self.isKing = True", "def HellFire_ShotGuns(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def mask(self):\n if pygame.time.get_ticks() % 500 >= 250:\n return self._mask_wingup\n else:\n return self._mask_wingdown", "def sprinkler_tick(self, water_available=-1):\n # idk if Python uses pass by reference, so I'm copying this variable just to be safe\n available_water = water_available\n for room in self.room_list:\n if room.sprinkling and room.fire_level > 0 and available_water != 0:\n room.fire_level -= 1\n available_water -= 1\n ## decrement num_onfire if fire was fully extinguished\n if room.fire_level == 0:\n self.num_onfire -= 1", "def _on_rose_timer(self):\n rose = self._rose\n rose.setMode(self._target_rose_mode)\n rose.mouseOver(self._hover_pos)\n self._show_band = True\n self._update_band_state()", "def hit(self):\n if self._power == 0 :\n return False\n self._power -= 1\n return True", "def _soundhelper(self):\n self._click()\n if self._last is None and self._touch is not None:\n if self._soundImage.contains(self._touch.x, self._touch.y):\n self._sound = not self._sound\n if self._soundImage.source == 'whitevolumeon.png':\n self._soundImage.source = 'whitevolumenull.png'\n else:\n self._soundImage.source = 'whitevolumeon.png'", "def is_shot(event):\n event_id = event['eventId']\n return event_id == 10", "def _hunting_mode(self):\n grid = self._grid\n width, height = grid.dimensions()\n valid_shot = False\n while not valid_shot:\n pos = (randint(0, width-1), randint(0, height-1))\n hit = grid.shoot(pos)\n shot = hit.cell\n valid_shot = shot not in HITS\n # if shot is valid\n if shot in SHIPS:\n self._stack += self._get_neighbours(pos)\n self._mode = TARGETING\n log(\"[HUNT]: Hit a ship at \" + str(pos) + \", going into targeting.\")\n elif shot == WATER:\n log(\"[HUNT]: Missed at \" + str(pos))\n if valid_shot:\n self.shots.add(pos)\n return shot", "def tile_picked(self):\n assert len(self.hand) == 5\n self.turn_count += 1", "def fireWest(self):\n self.rotate('w')\n gun = Laser(self)\n gun.shoot('w')\n self.agent.actionCompleted()", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def is_ringing(self) -> bool:", "def on_turnover(self):\n return True if self.rotor_setting in self.turnover_characters else False", "def left_twist(self):\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(-179)\n #time.sleep(.1)\n self.stop()", "def _check_retry_button(self, mouse_position):\n\t\tif self.gameover_images.retry_rect.collidepoint(mouse_position):\n\t\t\tself._reset_game()" ]
[ "0.6046966", "0.5901135", "0.58888614", "0.5877499", "0.5703894", "0.5684475", "0.56534463", "0.56150085", "0.5550172", "0.55474705", "0.5539146", "0.55061305", "0.545888", "0.5455816", "0.545385", "0.54346234", "0.5419669", "0.54050684", "0.5394224", "0.5392888", "0.5390452", "0.53868634", "0.53835183", "0.5383073", "0.536768", "0.53543216", "0.53402597", "0.53282857", "0.5327451", "0.5325288" ]
0.65306956
0
Create a user with a random email. Perfect for tests where you just need to set a user on some data model, but the user is not used for anything.
def create_random_user(): try: user = get_user_model().objects.create( email='{}@example.com'.format(uuid.uuid4())) except IntegrityError: return create_random_user() else: user.set_password('test') user.save() return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_user_dynamic_email(email):\n return get_user_model().objects.create_user(email=email,\n password=\"password123\",\n name=\"some name\")", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def createOtherUser(self, email):\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import FixedUserProvider\n properties = {'account': FixedUserProvider(value=email), 'status': 'valid'}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def generate_random_user():\n name = names.get_first_name()\n return User(name=name, email=f\"{name}@example.com\", password=\"testing_password\")", "def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user_fifth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name5\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email=\"[email protected]\", password=\"password123\"):\n\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='password'):\n return get_user_model().objects.create_user(email, password)", "def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email='[email protected]', password='testpass'):\n\n return get_user_model().objects.create_user(email, password)", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user_fourth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name4\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def _create_random_user(self,startname=\"\",site=None):\n \n username = startname + \"\".join([choice('AEOUY')+\n choice('QWRTPSDFGHHKLMNB')\n for x in range(3)])\n \n data = {'username':username,\n 'email':username+\"@test.com\"}\n \n return self._create_user(data,site)", "def test_creating_a_new_user_without_email(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, \"Test1234\")", "def sample_user_second(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name2\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def _create_user(self, email, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n return user", "def create_user(name, email):\n user = register(name, email)\n add_message(user=user, text=config.MSG_WELCOME)\n add_message(user=user, text=config.MSG_UNVERIFIED, can_dismiss=False)\n return user", "def create_user(self,email,password=None, **extra_fields):\n\n if not email: \n raise ValueError('Users must have an email address')\n #sets the email field of your user model, this is done on the model itself because there are no functions to change it.\n user = self.model(email=self.normalize_email(email), **extra_fields) \n user.set_password(password)\n user.save(using=self._db) #save using the defualt database in the settings.py file.\n\n return user", "def create_user(email, password):\n email_used = AuthUser.query.filter_by(email=email).first()\n if email_used:\n return False, \"Email address has already been used\"\n account = Account(email)\n account.plan_key = 'BASIC'\n account.is_active = True\n account.created = datetime.datetime.now()\n db.session.add(account)\n user = AuthUser(email, password, account)\n user.created = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n return user.id, None", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n try:\n with transaction.atomic():\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.generate_activation_code()\n user.save(using=self._db)\n return user\n except:\n raise", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_create_use_with_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, password='open@123')", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'test123456')" ]
[ "0.83424205", "0.81030285", "0.8037546", "0.8030638", "0.7968194", "0.7859456", "0.7846218", "0.7839635", "0.7839635", "0.7839635", "0.78273344", "0.78226376", "0.78000873", "0.77878624", "0.77727425", "0.7754782", "0.77377146", "0.7733991", "0.7668458", "0.7652283", "0.76155853", "0.7543719", "0.7488673", "0.74658835", "0.7417096", "0.739224", "0.73732513", "0.737066", "0.73639226", "0.7363418" ]
0.8454943
0
Roll the die once and insure the value is in possibleValues
def test_roll_once(self): self.assertIn(self.new_die.roll(), self.possible_values, "Rolled value was not in possible die values")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue", "def roll(self):\n #dieValue = [] \n self._value = random.randrange(Die.SIDES) + 1\n self._update()\n #dieValue.append(self._value)\n #print(dieValue)\n #print(self._value)\n self._valueA = random.randrange(Die.SIDES) + 1\n #self._update2()\n #print(self._valueA)", "def random_die():\n return randrange(1, 6)", "def die():\n return random.randint(1,6)", "def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result", "def check_cheating(self, dice=[]):\n\n #Assume they're not cheating until proven guilty\n self.cheating = False\n\n if self.current_stage == 3:\n if self.die_a not in dice and (self.die_a.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True\n elif self.die_b not in dice and (self.die_b.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True", "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def throw(self, move):\n for dice_index in move:\n self.dice[dice_index - 1] = random.randint(1,6)", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def sixteen_is_dead(players):\n \n number = setup_number_of_dices()\n faces = setup_number_of_faces()\n result_list = []\n for player in range(1, players+1):\n total_points = 0\n while total_points < 16:\n user_input = user_interface(player)\n if user_input == \"\":\n while True:\n user_input_2 = user_interface_2()\n if user_input_2 == \"\":\n dice_number = roll_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n else:\n dice_number = roll_cheating_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n if (total_points >= 16) or (total_points == 9) or (user_input == \"n\"):\n print()\n break\n if total_points < 16:\n result_list.append(total_points)\n else:\n print()\n break\n if total_points >= 16:\n print(\"Spieler\",player,\"hat das Spiel verloren!\")\n print()\n restart()\n else:\n player = 1\n for i in result_list:\n if i == min(result_list):\n print(\"Spieler\",player,\"hat das Spiel mit\",i,\"Punkten verloren!\")\n player += 1\n print()\n restart()", "def throw_dice(self):\n self.dice = []\n for i in range (6):\n die = random.randint(1, 6)\n self.dice.append(die)\n self.num_throws += 1", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)", "def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()", "def randomize_value(self) -> None:", "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def test_3_incorrect_value(self):\n d = copy.deepcopy(self.fitness_dict)\n d['WorkoutType'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['Minutes'] = 'Running'\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['CaloriesBurned'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))", "def _rollOneDie(self):\n return random.randint(1, 6)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def test_currentValue_is_updated_to_roll_value(self):\n rolled_value = self.new_die.roll()\n if rolled_value == self.new_die.currentValue:\n self.assertTrue(True, \"currentValue {} matches the rolled value\".format(self.new_die.currentValue))\n return", "def attack(self): # need to check defenders handcount\n \"\"\"Always returns a list of values\"\"\"\n if self.AI:\n # return rand.randint(0,len(self.currentHand))\n Error(\"AI not yet implemented for Attacking\")\n else:\n print(\"Select card from... \")\n cardManager.printHand(self.currentHand)\n card = int(input(\"to your attack: \"))\n while card not in self.currentHand: # error checking\n print(\"Please select a valid card from...\", end = \" \")\n cardManager.printHand(self.currentHand)\n card = int(input())\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def starve_checker(hunger):\n death_chance = -30\n hunger -= 1\n\n if (death_chance * (hunger-1)) > random.randint(1,100):\n death = True\n else:\n color.write(\"Somehow, through divine intervention, you manage to survive though the pain, although you know that the end is near. You should definitely eat something.\\n\",\"ERROR\")\n death = False\n return death", "def starve(self):\n # maybe push condition to caller\n if self.food.get() < self.population.get(): \n self.population.set(self.food.get())\n if self.population.get() <= 0:\n self.die()", "def is_die_held_in_wrong_stage(self, die):\n if type(die) != type(die_class.Die()):\n raise TypeError(\"Expecting Die argument.\")\n if self.game_stage == 1:\n return die.current_value not in die.possible_values[0:2]\n if self.game_stage == 2:\n return die.current_value not in die.possible_values[2:4]\n if self.game_stage == 3:\n return die.current_value not in die.possible_values[4:6]", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def dance(self):\n if not self.safe_to_dance():\n return False #shutdown\n for x in range(4): \n self.shuffle()\n self.skipp()\n self.spin_dizzy()\n self.for_back()\n self.break_neck()\n self.swiggly()\n self.break_neck()\n self.backward_shimmey()", "def die(world, percentage):\n\n \n infected = np.sum((world >= 1) & (world <= 10))\n to_die = percentage * infected\n if to_die < 1:\n to_die = 0\n else:\n to_die = to_die\n to_die = np.round(to_die).astype(int)\n\n\n indizes = [] # Für die Koordinaten der infizierten Zellen\n for i, v in np.ndenumerate(world):\n if v in range(1, 11):\n indizes.append(i)\n #Ziehe Stichprobe aus den infizierten Zellen und setze sie auf 300\n sample = random.sample(indizes, to_die)\n for i in sample:\n world[i] = 300\n \n return world", "def choosePiece(pieceList):\n dice = [1, 2, 3, 4, 5, 6]\n if len(pieceList) > 1:\n diceRoll = random.choice(dice)\n print(\"Dice Roll:\", diceRoll)\n if not any(piece for piece in pieceList if piece.value == diceRoll):\n # Piece is dead, finds next highest/lowest\n nextUp = -1\n nextDown = -1\n for i in range(diceRoll + 1,6):\n if any(piece for piece in pieceList if piece.value == i):\n nextUp = i\n break\n for i in range(diceRoll - 1, -1, -1):\n if any(piece for piece in pieceList if piece.value == i):\n nextDown = i\n break\n if nextUp == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextDown\n elif nextDown == -1:\n print(\"Piece\", diceRoll, \"is dead.\")\n diceRoll = nextUp\n else:\n print(\"Piece \", diceRoll, \" is dead. Choose \", nextDown, \" or \", nextUp, \".\", sep = '')\n diceRoll = input(\"> \")\n # Obtains user input\n while(diceRoll != str(nextUp) and diceRoll != str(nextDown)):\n diceRoll = input(\"Invalid choice. Please try again.\\n> \")\n diceRoll = int(diceRoll, base = 10)\n else:\n diceRoll = pieceList[0].value\n print(\"Only 1 piece left.\")\n\n return [piece for piece in pieceList if piece.value == diceRoll][0]", "def take_damage(self, value, type_=None):\n if type_ in self.resistances:\n taken = math.floor(value / 2)\n # TODO (phillip): event log should show that damage was reduced\n elif type_ in self.vulnerabilities:\n taken = value * 2\n else:\n taken = value\n\n # Only used to return at the end\n actual_taken = min(self.hp, taken)\n\n self.hp -= taken\n if self.hp < -self.max_hp:\n # TODO (phillip): Implement creature death\n pass\n\n self.hp = max(0, self.hp)\n return actual_taken", "def rolldie():\n return int(random.random()*6)+1 # or use randrange()" ]
[ "0.6406445", "0.6295456", "0.58946204", "0.58158296", "0.5748856", "0.56759375", "0.5639539", "0.56361294", "0.5632964", "0.558092", "0.55762345", "0.55376804", "0.5525275", "0.54982567", "0.54963", "0.5475915", "0.54747736", "0.54597336", "0.5452825", "0.543778", "0.5421019", "0.54164207", "0.541469", "0.5407986", "0.53746474", "0.536787", "0.5345295", "0.5343674", "0.53386766", "0.53345805" ]
0.7380792
0
Make sure the Die's currentValue is updated to match what is rolled
def test_currentValue_is_updated_to_roll_value(self): rolled_value = self.new_die.roll() if rolled_value == self.new_die.currentValue: self.assertTrue(True, "currentValue {} matches the rolled value".format(self.new_die.currentValue)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_roll_value_changes(self):\n\n holding_value = self.new_die.roll()\n for i in range(10):\n if self.new_die.roll() != holding_value:\n print(\"Rolled die value {} is different from Holding Value {}\".format(self.new_die.currentValue, holding_value))\n self.assertTrue(True)\n return\n\n self.assertTrue(False, \"Die value did not change from Holding Value for 10 rolls\")", "def roll(self):\n #dieValue = [] \n self._value = random.randrange(Die.SIDES) + 1\n self._update()\n #dieValue.append(self._value)\n #print(dieValue)\n #print(self._value)\n self._valueA = random.randrange(Die.SIDES) + 1\n #self._update2()\n #print(self._valueA)", "def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def account_for_new_score(self):\n self.rolls += 1\n if self.continued is True:\n self.total_score += self.current_roll.score\n self.dice_remaining = self.current_roll.dice_remaining\n\n if self.dice_remaining == 0:\n self.resets += 1\n self.dice_remaining = 5", "def test_reroll_dice(self):\n self.roll.current_dice_list = [1, 2, 3, ]\n self.roll.keeper_dice_list = [1, 2]\n\n self.roll.reroll_dice(self.roll.current_dice_list)\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n self.assertEqual(self.roll.current_dice_list[3], 1)\n self.assertEqual(self.roll.current_dice_list[4], 2)", "def roll(self):\n self._rollCount += 1\n self._die1.roll()\n self._die2.roll()\n (v1, v2) = (self._die1.getValue(),\n self._die2.getValue())\n self._lastRoll = (v1, v2)\n if self._initialSum == 0:\n initialSum = v1 + v2\n if initialSum in (2, 3, 12):\n return \"LOSE\"\n elif initialSum in (7, 11):\n return \"WIN\"\n else:\n return \"CONTINUE\"\n else:\n sum = v1 + v2\n if sum == 7:\n return \"LOSE\"\n elif sum == initialSum:\n return \"WIN\"\n else:\n return \"CONTINUE\"", "def update_last_roll(self, roll):\n\n # Increment the attribute by the passed value\n self._last_roll = roll", "def testRoll(self):\n \n nsides=3\n die = BaseDie(nsides)\n lighted_die = LightedDie(nsides,colors={1:'blue',2:'yellow',3:'gold'})\n\n self.assertEqual(die.last_roll,None)\n\n die.roll()\n lighted_die.roll()\n\n for d in [die,lighted_die]:\n self.assertTrue(d.last_roll>0 and d.last_roll <= nsides)", "def record_roll(self, roll):\n if roll == 1:\n self.turn_over = True\n self.score = 0\n else:\n self.score += roll", "def roll(self):\n self.rolled = random.randint(1, 6)\n return self.rolled", "def frame_roll(self, roll):\n\n self._pins -= roll.pins\n if self._pins < 0:\n raise ValueError(\"is someone cheating ?\")\n self._roll -= 1\n self._score.append(roll)", "def update_total_rolls(self):\n\n # Incremene the attribute by 1\n self._total_rolls += 1", "def test_roll_dice(self):\n # create partial current and keeper list to pass into roll_dice\n self.roll.current_dice_list = [1, 2, 3]\n self.roll.keeper_dice_list = [1, 2, 3]\n\n self.roll.roll_dice()\n\n self.assertEqual(len(self.roll.current_dice_list), 5)\n self.assertEqual(len(self.roll.keeper_dice_list), 0)\n\n for i, dice in enumerate(self.roll.current_dice_list):\n self.assertTrue(1 <= dice <= 6)", "def shoot(self, dice: Roll) -> tuple:\n result = dice.roll()\n self.remember(result)\n return result", "def hook_rolldice(self):\n return ui.roll(self)", "def roll(self):\n return self._roll", "def roll(self):\n return self._roll", "def roll(self):\n self.current_roll = random.randint(self.min, self.max)\n return self.current_roll", "def go(self):\n roll = self.die.roll()\n self.record_roll(roll)\n self.player.record_roll(roll)\n # print(\"{} you rolled a {} and your turn score is {}\".format(self.player.name, roll, self.score))\n if not self.turn_over:\n self.turn_over = not self.player.go_again()", "def __init__(self):\n self._die1 = Die()\n self._die2 = Die()\n self._lastRoll = None\n self._initialSum = 0\n self._rollCount = 0", "def throw(self, move):\n for dice_index in move:\n self.dice[dice_index - 1] = random.randint(1,6)", "def roll_die(self):\n number = randint(1, self.sides) \n print(number)", "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "def roll():\n pass", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def test_set_rolls(self):\n computer1 = computer.Computer(1)\n computer1.rolls = 5\n\n res = computer1.rolls\n exp = 5\n self.assertEqual(res, exp)", "def calculate_roll(self):\n # Takes all the numbers from the last roll and adds them up.\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)", "def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()", "def _test_1dice(self, state):\n\n # quit if tested entry has no value\n if state.selection.value is None:\n return state\n\n if state.dice == \"auto\":\n state.rolls = self._roll_dice(1, 1, 20)\n\n state.result = state.selection.value + state.mod - state.rolls[0]\n return state" ]
[ "0.7680647", "0.74207294", "0.7323546", "0.72374326", "0.63809884", "0.63557065", "0.6267854", "0.62388927", "0.6197402", "0.6189299", "0.61681074", "0.60864174", "0.6065291", "0.6056789", "0.6015279", "0.6010526", "0.5958524", "0.5958524", "0.5948918", "0.59019583", "0.5875331", "0.5871934", "0.5822006", "0.58180386", "0.5813809", "0.5792196", "0.5790852", "0.5782793", "0.5735243", "0.57317877" ]
0.81208175
0
Returns the pair (symbols, positions) where symbols is a row major array of images and positions is the corresponding centers of those images relative to the input image.
def get_symbol_images_and_positions(im): gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) _, threshold = cv2.threshold(gray, 100, 255, 0) threshold = 255 - threshold # show(threshold) contours, hierarchy = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(contour, 8, True) for contour in contours] contours = [c for c in contours if c.shape[0] == 4 and cv2.isContourConvex(c)] contours = sorted(contours, key=cv2.contourArea) contour = contours[-1] offset_x, offset_y, _, _ = cv2.boundingRect(contour) symbols_im = trim_to_contour_bounding_box(im, contour) half_height = symbols_im.shape[0] / 2 half_width = symbols_im.shape[1] / 2 symbols = ( symbols_im[:half_height, :half_width], symbols_im[:half_height, half_width:], symbols_im[half_height:, :half_width], symbols_im[half_height:, half_width:], ) symbols = (_process_button_im(symbol_im) for symbol_im in symbols) positions = ( (offset_x + half_width / 2, offset_y + half_height / 2), (offset_x + half_width * 3 / 2, offset_y + half_height / 2), (offset_x + half_width / 2, offset_y + half_height * 3 / 2), (offset_x + half_width * 3 / 2, offset_y + half_height * 3 / 2), ) return symbols, positions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coords_in(self, frame_size=None, shape=None, img=None):\n w, h = _to_frame_size(frame_size=frame_size, shape=shape, img=img)\n return [(int(round(x * w)), int(round(y * h))) for x, y in self.points]", "def get_coords_by_label_2D(image, label):\n coords = np.argwhere(image == label)\n y = [y for y, x in coords]\n x = [x for y, x in coords]\n return y, x", "def image(self, state):\n return state['positions']", "def image_to_points(numpy_image):\r\n res = []\r\n for i in range(numpy_image.shape[0]):\r\n for j in range(numpy_image.shape[1]):\r\n if numpy_image[i,j]==0:\r\n res.append([i,j])\r\n return res", "def image_coordinates(self, temp):\n iy = np.array((temp.y[:,None]-self.extent[2])/self.spacing[1],dtype=np.int64)\n ix = np.array((temp.x[None,:]-self.extent[0])/self.spacing[0],dtype=np.int64)\n return (iy,ix)", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def position(self):\n return self.atoms.reshape((1,-1))", "def __key_points(image_shape, point_list):\n keypoint_list = []\n for i in range(point_list.shape[0]):\n keypoint_list.append(ia.Keypoint(x=point_list[i, 0, 0], y=point_list[i, 0, 1]))\n return ia.KeypointsOnImage(keypoint_list,\n shape=ia.quokka(size=image_shape[:2]))", "def setup_positions(self):\n x, y = np.meshgrid(np.arange(self.img.shape[1]), np.arange(self.img.shape[0]))\n x = x[self.img > 0]\n y = y[self.img > 0]\n self.X = np.array([x, y]).T\n N = x.size\n pos2idx = {(x[i], y[i]):i for i in range(x.size)}\n neighbors = [[i] for i in range(N)]\n for i in range(N):\n xi = x[i]\n yi = y[i]\n for (dx, dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n neighb = (xi+dx, yi+dy)\n if neighb in pos2idx:\n neighbors[i].append(pos2idx[neighb])\n self.pos2idx = pos2idx\n self.neighbors = neighbors", "def get_pixel_list(img):\n orig_shape = img.shape # Remember the original shape of the img.\n # Store the img as a x by z array (z being the length of the colour space)\n # Essentially just a list of pixels.\n\n if len(img.shape) == 3:\n img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])\n elif len(img.shape) == 2:\n img = img.reshape(img.shape[0] * img.shape[1],)\n return orig_shape, img", "def _get_positions(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tif text == 'face' or text == 'super woman':\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -1)\n\t\t\t\telse:\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -2)\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1", "def get_seed_points(img,seed_values):\n\n m,n = img.shape\n coordinates = [(i,j) for i,j in it.product(range(m),range(n)) if img[i,j] in seed_values]\n\n return coordinates", "def positions(self):\n method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n return [getattr(line, method)()[0] for line in self.artists]", "def _get_patches_coords(image_sizes, patch_size):\n patches = []\n patch_grids = []\n stride = int(patch_size / 2)\n for k, dims in enumerate(image_sizes):\n width, height = dims\n\n n_w = math.ceil(width / patch_size) * 2 - 1 \\\n - int((width % patch_size) < stride)\n n_h = math.ceil(height / patch_size) * 2 - 1 \\\n - int((height % patch_size) < stride)\n n_w = n_w if n_w > 0 else 1\n n_h = n_h if n_h > 0 else 1\n\n for j in range(n_h):\n for i in range(n_w):\n patches.append([i*stride, j*stride,\n i*stride+patch_size-1, j*stride+patch_size-1,\n patch_size*patch_size, k])\n patch_grids.append([n_w, n_h])\n return patches, patch_grids", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def get_image_pair(self):\n pooled_images = self.real_images + self.fake_images\n img1_info = random.choice(self.real_images)\n if self.enable_fake_pairs:\n img1_info = random.choice(pooled_images)\n img2_info = random.choice(pooled_images)\n img1 = Image.open(img1_info[0])\n img2 = Image.open(img2_info[0])\n label1 = np.array([img1_info[1][0]])\n label2 = np.array([img2_info[1][0]])\n\n return img1, img2, label1, label2", "def get_positions(specs):\r\n xy = []\r\n for i, spec in enumerate(specs):\r\n slit = spec.split(\"n3311\", 1)[1].replace(\".fits\", \"\")\r\n # slit = spec.split(\".\")[0].split(\"_\", 1)[1][5:]\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)", "def test_synth_positions():\n background = Image.new('RGB', (30, 20))\n patch_1 = Image.new('RGB', (10, 10))\n patch_2 = Image.new('RGB', (20, 5))\n\n parameters = {'data': [background, patch_1, patch_2]}\n\n positions = images.synth_positions(parameters)\n\n assert_equal(positions[0][0], 0)\n assert_equal(positions[0][1], 5)\n assert_equal(positions[1][0], 10)\n assert_equal(positions[1][1], 5)", "def horizontal_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:,:-1]\n\ty = mat[:, 1:]\n\t\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w):\n\t\tfor j in range(h-1):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def keys(self):\n\t\treturn iter(Point(x, y) for y, x in itertools.product(range(self.dims.height), range(self.dims.width)))", "def i_coords(self):\n ref_x = np.arange(-self.ref_w / 2, self.ref_w / 2 + 0.002, 0.002)\n\n if self.ref_shape == 'c': # Curved reflector\n dist_coords1 = [(ref_x[i], pos_on_semicircle(ref_x[i], self.R, self.c_xy)) for i in range(self.I)]\n dist_coords2 = [(ref_x[i + 1], pos_on_semicircle(ref_x[i + 1], self.R, self.c_xy)) for i in range(self.I)]\n a_i = [distance(dist_coords1[i], dist_coords2[i]) for i in range(self.I)]\n\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cy_i = [pos_on_semicircle(x, self.R, self.c_xy) for x in cx_i]\n i_coords = list(zip(cx_i, cy_i))\n else: # Flat reflector\n a_i = [(ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n i_coords = [(x, self.h) for x in cx_i]\n d = {'ref_x': ref_x, 'A_i': a_i, 'I_coords': i_coords, 'cx_i': cx_i}\n\n return d", "def positions(r):\n\n X = []\n Y = []\n\n leftx = -r*(nx - 1) / 2\n topy = -r*(ny - 1) / 2\n\n for i in range(0, nx):\n for j in range(0, ny):\n X.append(leftx + r * i)\n Y.append(topy + r * j)\n\n return (X, Y)", "def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts", "def horizontal_pairings1(mat, rand_pix):\n\tw, h = mat.shape\n\tx = mat[:,:-1]\n\ty = mat[:, 1:]\n\t\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(len(rand_pix)):\n\t\tpix = rand_pix[i]\n\t\trow = (pix // (h-1))\n\t\tcol = pix - (row*511)\n\t\tx_cor_list.append(x[row, col])\n\t\ty_cor_list.append(y[row, col])\n\n\treturn x_cor_list, y_cor_list", "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]", "def get_positions(p_state, idx_image=-1, idx_chain=-1):\n nos = system.get_nos(p_state, idx_image, idx_chain)\n ArrayType = scalar*3*nos\n Data = _Get_Positions(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n array_pointer = ctypes.cast(Data, ctypes.POINTER(ArrayType))\n array = np.frombuffer(array_pointer.contents, dtype=scalar)\n array_view = array.view()\n array_view.shape = (nos, 3)\n return array_view", "def direction(self):\n import pylab\n i = 0\n j = 0\n vals = []\n vects = []\n kpx = self.keypoints.x\n kpy = self.keypoints.y\n sigma = self.keypoints.sigma\n img = self.raw\n pylab.figure()\n pylab.imshow(img, interpolation='nearest')\n\n for y, x, s in zip(kpy, kpx, sigma):\n s_patch = numpy.trunc(s * 2)\n\n if s_patch % 2 == 0 :\n s_patch += 1\n\n if s_patch < 3 : s_patch = 3\n\n if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1 and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):\n\n patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1, x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]\n x_patch = numpy.arange(s_patch)\n Gx = numpy.exp(-4 * numpy.log(2) * (x_patch - numpy.median(x_patch)) ** 2 / s)\n Gy = Gx[:, numpy.newaxis]\n dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch - numpy.median(x_patch))\n dGy = dGx[:, numpy.newaxis]\n d2Gx = -8 * numpy.log(2) / s * ((x_patch - numpy.median(x_patch)) * dGx + Gx)\n d2Gy = d2Gx[:, numpy.newaxis]\n\n Hxx = d2Gx * Gy\n Hyy = d2Gy * Gx\n Hxy = dGx * dGy\n\n d2x = (Hxx.ravel() * patch.ravel()).sum()\n d2y = (Hyy.ravel() * patch.ravel()).sum()\n dxy = (Hxy.ravel() * patch.ravel()).sum()\n H = numpy.array([[d2y, dxy], [dxy, d2x]])\n val, vect = numpy.linalg.eig(H)\n\n# print 'new point'\n# print x, y\n# print val\n# print vect\n# print numpy.dot(vect[0],vect[1])\n e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])\n j += 1\n# print j\n# print e\n if numpy.abs(val[1]) < numpy.abs(val[0]): # reorganisation des valeurs propres et vecteurs propres\n val[0],val[1] = val[1],val[0]\n vect = vect[-1::-1,:]\n\n\n pylab.annotate(\"\", xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\n pylab.annotate(\"\", xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n pylab.plot(x, y, 'og')\n vals.append(val)\n vects.append(vect)\n return vals, vects", "def get_points(self, pts_list, idx, org_imsize):\r\n xy, n_pts = pts_list[idx].size()\r\n pad_pts = torch.zeros((xy, self.max_pts - n_pts)) - 2\r\n x_crds = pts_list[idx][0] * (self.img_size / org_imsize[0])\r\n y_crds = pts_list[idx][1] * (self.img_size / org_imsize[1])\r\n kps = torch.cat([torch.stack([x_crds, y_crds]), pad_pts], dim=1)\r\n\r\n return kps, n_pts" ]
[ "0.57239145", "0.56765956", "0.5582602", "0.5538157", "0.55075634", "0.54908365", "0.54908365", "0.546828", "0.5440452", "0.5436629", "0.53986806", "0.53672814", "0.5325077", "0.53231037", "0.53162026", "0.53007215", "0.5292834", "0.5292581", "0.5284873", "0.527916", "0.5266517", "0.5234355", "0.5227053", "0.52252686", "0.5192839", "0.5190298", "0.51788324", "0.51687276", "0.5165647", "0.5148098" ]
0.74972606
0
commandline frontend to portcheck
def portcheck_main(args=sys.argv[1:]): ports = portcheck(*args) for i in ports: print '%s: %s' % (i, ports[i]) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_port(args):", "def main():\n import getopt\n\n try:\n options, remainder = getopt.getopt(\n sys.argv[1:], '',\n ['help', # Print usage msg, exit\n 'short', # Output is shortened\n 'pid', # Output only pid of listenig process\n 'proc', # Output only process name of listening port\n 'kill', # Kill the process give its port\n ]\n )\n except getopt.GetoptError as err:\n sys.stderr.write(str(err) + '\\n')\n usage(1)\n\n shortened = False\n pid_only = False\n proc_only = False\n kill = False\n for opt, arg in options:\n if opt in ['--help']:\n usage(0)\n elif opt in ['--short']:\n shortened = True\n elif opt in ['--pid']:\n pid_only = True\n elif opt in ['--proc']:\n proc_only = True\n elif opt in ['--kill']:\n kill = True\n else:\n # Should never happen. getopt() will catch this.\n sys.stderr.write('Unhandled option:\"%s\"\\n' % opt)\n usage(1)\n\n try:\n if len(remainder):\n for aport in remainder:\n int(aport) # Insist on a valid integer.\n else:\n remainder = []\n remainder.append(PORT)\n except ValueError as err:\n sys.stderr.write('port number must be all numeric:%s\\n' %\n str(remainder))\n return 255\n ret_code = 0\n for aport in remainder:\n status = listening(aport, shortened, pid_only, proc_only, kill)\n if status == 255:\n return 255 # Illegal option\n ret_code += status\n\n return ret_code", "def test_port_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-p\", \"66666\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-p\" / \"--port\"' in invalid_res.output\n assert \"'port' is invalid in configuration\" in invalid_res.output", "def run_app():\n target = None\n negative_results = False\n\n description = 'Simple TCP port scanner'\n epilog = 'The author of this code take no responsibility for your use or misuse'\n parser = argparse.ArgumentParser(prog='TCPPortScan.py', description=description, epilog=epilog)\n parser.add_argument(\"target\", help=\"Your target to scan\")\n parser.add_argument('-p', '--port', help=\"Set a single port\", default=22, type=int)\n parser.add_argument('-r', '--range', help=\"Set a port range (eq 22-80)\")\n parser.add_argument(\"--all\", help=\"Show negative results (closed ports)\", action=\"store_true\")\n args = parser.parse_args()\n\n if len(args.target) < 1:\n print('You did not provide any target?')\n exit(1)\n else:\n target = args.target\n\n if args.all:\n negative_results = True\n\n if args.range:\n print(\"Start scanning ports {} on target {}\".format(args.range, target))\n range_list = args.range.split('-')\n for element in range(int(range_list[0]), int(range_list[1]) + 1):\n port_scan(target, element, negative_results)\n else:\n print(\"Start scanning port {} on target {}\".format(args.port, target))\n port_scan(target, args.port, negative_results)", "def usage(exit_code):\n\n sys.stderr.write(\"\"\"\n List the processes that are listening to a port.\n Defaults to ZeroMQ port of 5570.\n\n Use by:\n listeningPort [--help] [--short | --pid | --proc] [--kill] \\\n <port0> [<port1> ...]\n e.g.:\n listeningPort 5570 # The ZeroMQ default port\n listeningPort 5570 5571 5572 # Multiple ports may be checked\n listeningPort --short 5570\n listeningPort $(seq 5570 5580) # Ports 5570 through 5580 inclusive.\n\n For the case of a free port, output similar to:\n Port 5571 : Nobody listening\n\n --help = this message\n\n Only one of the following can be supplied:\n --short = Output consists of only three space separated fields:\n <port> <pid of listener> <process name of listener>\n Ports with nobody listening gets ignored for output.\n --pid = Output consists only of a pid\n --proc = Output consists only of process names\n --kill = Any ports with a listener will be killed with \"kill -9 <pid>\"\n\n Return codes:\n 255 == Invalid command line.\n 0 == Nobody listening to <port>\n > 0 == The number of ports someone is listening to.\n For a series of port, this value is the number\n of ports with a listener.\n For a single port, this will be 1 is someone\n is listening.\n \\n\n ***NOTICE***: This routine does NOT work on OSX!\n Replace this with:\n lsof -i<port> | awk '{ print $2; }' | head -2\n PID\n 18101\n This prints only the pid of the process using this port.\n Now use \"ps\" to find the process:\n ps ax | grep 18191 | grep -v grep\n 10191 s001 S+ 0:00.00 /usr/bin/python /usr/local/bin/logCollector\n \"\"\")\n sys.exit(exit_code)", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex", "def test_option_server_port(self):\n # empty redis-server host name\n cmd, output = runCmdOutput(['-s', '', '-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)\n # port number out of range\n cmd, output = runCmdOutput(['-s', 'localhost', '-p', '99999'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)", "def main(argv):\n # Our command line is trivial so I avoid an argparse import. If we ever\n # grow more than 1-2 args, switch to a using argparse.\n if '-h' in argv or '--help' in argv:\n print(argv[0], 'usage:\\n')\n import inspect\n print(inspect.getdoc(main))\n sys.exit(1)\n pid=int(argv[1]) if len(argv) > 1 else os.getppid()\n bind_timeout=float(argv[2]) if len(argv) > 2 else 0\n port = _pick_unused_port(pid=pid, noserver_bind_timeout=bind_timeout)\n if not port:\n sys.exit(1)\n print(port)", "def check(self, target, port):\n pass", "def main():\n return run_network_interface_check()", "def __usage(cls):\n print('Python Port Scanner')\n print('Please make sure the input host name is in the form of \"foo.com\" or \"http://foo.com!\"\\n')", "def main():\n arguments = docopt(__doc__)\n tail(arguments['--host'], int(arguments['--port']),\n arguments['--source'], arguments['--source-host'],\n arguments['--type'])", "def test_build_command_port(self):\n actual_result = IperfServerCommandBuilder()\\\n .set_port(IPERF_PORT)\\\n .build_server_command()\n self.assertListEqual(actual_result, ['iperf', '-s', '-p', '22'])", "def checkCommandArgs():\n try:\n int(sys.argv[1]) #sin\n int(sys.argv[2]) #sout\n int(sys.argv[3]) #csin\n except (ValueError, IndexError) as e:\n print (\"One or more port numbers are not ints or were not entered\")\n sys.exit()\n \n for i in range(3):\n if int(sys.argv[i+1]) > PORT_RANGE_UPPER or int(sys.argv[i+1]) < PORT_RANGE_LOWER:\n print(\"One or more port number out of range\")\n sys.exit()\n \n if not os.path.isfile(sys.argv[4]):\n print(\"file does not exist\")\n sys.exit()\n \n return int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), sys.argv[4]", "def port():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.7720498", "0.73162156", "0.7220862", "0.7017648", "0.693446", "0.69314975", "0.66068625", "0.6573738", "0.6537948", "0.64890003", "0.6484347", "0.64464116", "0.64462745", "0.6400242", "0.634724", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765", "0.62326765" ]
0.8002637
0
commandline frontend to portkill
def portkill_main(args=sys.argv[1:]): # Probably should use optparse or some such. kw = {} if '-v' in args: kw['verbose'] = True args = [a for a in args if a != '-v'] if '-s' in args: index = args.index('-s') kw['sleeptime'] = args[index + 1] args = args[:index] + args[index+2:] portkill(*args, **kw) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remote_kill():", "def cmd_port(args):", "def GET_kill(self):\n sys.exit(0)", "def main():\n import getopt\n\n try:\n options, remainder = getopt.getopt(\n sys.argv[1:], '',\n ['help', # Print usage msg, exit\n 'short', # Output is shortened\n 'pid', # Output only pid of listenig process\n 'proc', # Output only process name of listening port\n 'kill', # Kill the process give its port\n ]\n )\n except getopt.GetoptError as err:\n sys.stderr.write(str(err) + '\\n')\n usage(1)\n\n shortened = False\n pid_only = False\n proc_only = False\n kill = False\n for opt, arg in options:\n if opt in ['--help']:\n usage(0)\n elif opt in ['--short']:\n shortened = True\n elif opt in ['--pid']:\n pid_only = True\n elif opt in ['--proc']:\n proc_only = True\n elif opt in ['--kill']:\n kill = True\n else:\n # Should never happen. getopt() will catch this.\n sys.stderr.write('Unhandled option:\"%s\"\\n' % opt)\n usage(1)\n\n try:\n if len(remainder):\n for aport in remainder:\n int(aport) # Insist on a valid integer.\n else:\n remainder = []\n remainder.append(PORT)\n except ValueError as err:\n sys.stderr.write('port number must be all numeric:%s\\n' %\n str(remainder))\n return 255\n ret_code = 0\n for aport in remainder:\n status = listening(aport, shortened, pid_only, proc_only, kill)\n if status == 255:\n return 255 # Illegal option\n ret_code += status\n\n return ret_code", "def Run(port):\n\tport.write(\"R\");", "def InterfaceClientStop(self, exitCode=200): \n pass", "def port_delete(switch, port):\n client.port.delete(switch, port)", "def TerminalClientStop(self, exitCode=200):\n pass", "def kill_process_by_port(port):\n port = int(port)\n pid = get_pid_by_port(port)\n if pid:\n return kill(pid)", "def kill(ctx, analytic_host, analytic_port):\n client = aceclient.ConfigClient(host=analytic_host, port=analytic_port)\n client.kill()", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def _stop_binary(self, alias):\n if alias is None:\n command = process_manager.ProcessManager.STOP + '\\n'\n else:\n command = '%s %s\\n' % (process_manager.ProcessManager.STOP, alias)\n self._socket.sendall(command)", "def kill(self, id):", "def kill(self, id):", "def close(self):\n self.port.send_command(\"atz\")\n self.port.close()\n self.port = None", "def kill_server(hosts):\n kill_cmds = [\n \"pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n # Intentionally ignoring the exit status of the command\n pcmd(hosts, \"; \".join(kill_cmds), False, None, None)", "def killServer(self, display):\n raise NotImplementedError", "def kill(host):\n\ttry:\n\t\tprocess = subprocess.Popen([\"ssh\", host, \"pgrep -u cst042 python | xargs kill -s SIGTERM\"])\n\t\tprint process.wait()\n\texcept Exception, e:\n\t\tprint \"Unable to kill on %s\" % (str(host))", "def on_StopNode_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n print(\"We will kill all gman process!\")\n reply = QMessageBox.question(self, '确认', '确认kill所有gman任务吗', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n autokillGman()\n self.OnlyDisplay(\"kill -9 |grep gman\")\n else:\n print(\"Keep GMAN run.......!\")", "def cmd_quit(args):", "def phone_kill(self) -> None:", "def kill(targets, controller=False):", "def main():\n args = parser.parse_args()\n terminate(\n args.uuid,\n args.time,\n args.limit,\n skip=args.skip\n )", "def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)", "def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')", "def stop():\n\n crate = get_crate()\n # Tell the thread to stop\n crate.mch_comms.stop = True\n # Stop the ipmitool shell process\n try:\n if crate.mch_comms.ipmitool_shell:\n crate.mch_comms.ipmitool_shell.terminate()\n crate.mch_comms.ipmitool_shell.kill()\n except:\n pass", "def processKill(uPid):\n return processTerminate(uPid);", "def test_build_kill_command(self):\n actual_result = IperfKillCommandBuilder(SERVER_USER)\\\n .set_ip_address(SERVER_IP)\\\n .set_password(SERVER_PASSWORD)\\\n .to_build()\n self.assertListEqual(actual_result,\n ['sshpass', '-p', 'QWERTY', 'ssh',\n '[email protected]', 'pkill', '-9',\n 'iperf', ';echo', '$?'])", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex" ]
[ "0.7240684", "0.7052073", "0.65952784", "0.63656825", "0.62713367", "0.6178196", "0.6129385", "0.6117073", "0.61063033", "0.60440135", "0.6033156", "0.60245794", "0.5999834", "0.5967886", "0.5967886", "0.5951784", "0.5946233", "0.5944419", "0.5938858", "0.59301466", "0.59177667", "0.59145415", "0.58959556", "0.5889466", "0.58570606", "0.58524555", "0.5826737", "0.5822436", "0.58028984", "0.5782465" ]
0.77358234
0
Hotfix for exporting mixed precision model as float32.
def export_model_as_float32(temporary_model, checkpoint_path, export_path): checkpoint = tf.train.Checkpoint(model=temporary_model) manager = tf.train.CheckpointManager( checkpoint=checkpoint, directory=checkpoint_path, max_to_keep=3 ) checkpoint.restore(manager.latest_checkpoint).expect_partial() temporary_model.save(export_path, include_optimizer=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_float32(elem):\n return elem.astype(np.float32)", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data", "def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float32, mask)", "def ts_float32(val):\n return np.float64(val)", "def read_float32(self):\n return self.read(BitTypes.FLOAT_LE_32.value)", "def floatx():\n return _FLOATX", "def test_float_log(self):\n htype = h5t.py_create('f', logical=True)\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def write_float32_list(self, float_list: List[float]) -> None:\n self.write_int32(len(float_list))\n for f in float_list:\n self.write_float32(f)", "def convert_fp32_or_fp16(\n input_model_dir, output_model_dir, batch_size, precision_mode):\n trt.create_inference_graph(\n input_graph_def=None,\n outputs=None,\n max_batch_size=batch_size,\n input_saved_model_dir=input_model_dir,\n output_saved_model_dir=output_model_dir,\n precision_mode=precision_mode)", "def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16", "def to_float32(n):\n return np.cast[\"float32\"](n)", "def export_onnx():\r\n model = DivideBy255()\r\n X = torch.randn(1, 3, 256, 256, dtype=torch.float)\r\n onnx_name = \"DivideBy255.onnx\"\r\n\r\n print(f\"Generating {onnx_name}\")\r\n torch.onnx.export(\r\n model,\r\n (X),\r\n onnx_name,\r\n opset_version=10,\r\n do_constant_folding=True,\r\n # verbose=True,\r\n # input_names=['Identity_1', 'Identity'],\r\n output_names=['input_1']\r\n )", "def _float_feature(value):\n\treturn tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def save_float16_npy(data, path):\n np.save(path, data.astype(np.float16))", "def _float_feature(value):\r\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def cst(x):\n\tfrom rhsinfo import wp\n\tif wp == 'float32': x = np.float32(x)\n\tif wp == 'float64': x = np.float64(x)\n\treturn x", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def convert_to_fp32(tensor):\n\n def _convert_to_fp32(tensor):\n return tensor.float()\n\n def _is_fp16_bf16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and tensor.dtype in (torch.float16, torch.bfloat16)\n\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))" ]
[ "0.6170572", "0.60380393", "0.60296834", "0.59463894", "0.59329", "0.58766335", "0.5724392", "0.5597414", "0.5592186", "0.55823284", "0.5563138", "0.5558887", "0.5557625", "0.55393016", "0.5500251", "0.54938865", "0.5477756", "0.54765797", "0.54673815", "0.5465686", "0.545568", "0.545568", "0.54511344", "0.5446573", "0.5446573", "0.5446573", "0.5446573", "0.5446573", "0.5439875", "0.5439875" ]
0.6395869
0
Generate a denormalized Catalog of matches This is intended for writing matches in a convenient way.
def denormalizeMatches(matches, matchMeta=None): if len(matches) == 0: raise RuntimeError("No matches provided.") refSchema = matches[0].first.getSchema() srcSchema = matches[0].second.getSchema() refMapper, srcMapper = lsst.afw.table.SchemaMapper.join([refSchema, srcSchema], ["ref_", "src_"]) schema = refMapper.editOutputSchema() distKey = schema.addField("distance", type=float, doc="Distance between ref and src") catalog = lsst.afw.table.BaseCatalog(schema) catalog.reserve(len(matches)) for mm in matches: row = catalog.addNew() row.assign(mm.first, refMapper) row.assign(mm.second, srcMapper) row.set(distKey, mm.distance) if matchMeta is not None: catalog.getTable().setMetadata(matchMeta) return catalog
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _matches_to_components(self, matches):\n subcomponents = []\n for match in matches:\n subcomponents.append(match.get_component())\n return subcomponents", "def generate_complex_catalog(stem: str = '') -> cat.Catalog:\n group_a = generators.generate_sample_model(cat.Group, True)\n group_a.id = f'{stem}a'\n group_a.controls = generate_control_list(group_a.id, 4)\n part = generators.generate_sample_model(common.Part)\n part.id = f'{stem}a-1_smt'\n part.parts = None\n group_a.controls[0].parts[0].id = f'{stem}_part_with_subpart'\n group_a.controls[0].parts[0].parts = [part]\n group_b = generators.generate_sample_model(cat.Group, True)\n group_b.id = f'{stem}b'\n group_b.controls = generate_control_list(group_b.id, 3)\n group_b.controls[2].controls = generate_control_list(f'{group_b.id}-2', 3)\n group_ba = generators.generate_sample_model(cat.Group, True)\n group_ba.id = f'{stem}ba'\n group_ba.controls = generate_control_list(group_ba.id, 2)\n group_b.groups = [group_ba]\n\n catalog = generators.generate_sample_model(cat.Catalog, True)\n catalog.controls = generate_control_list(f'{stem}cat', 3)\n catalog.params = generate_param_list(f'{stem}parm', 3)\n\n test_control = generators.generate_sample_model(cat.Control, False)\n test_control.id = f'{stem}test-1'\n test_control.params = [common.Parameter(id=f'{test_control.id}_prm_1', values=['Default', 'Values'])]\n test_control.parts = [\n common.Part(\n id=f'{test_control.id}-stmt', prose='The prose with {{ insert: param, test-1_prm_1 }}', name='statement'\n )\n ]\n catalog.controls.append(test_control)\n catalog.groups = [group_a, group_b]\n\n return catalog", "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def __extract_patterns_and_spaces(self):\n\n def __decorate_nodes(nodes, space):\n \"\"\"\n Performs a backward search from a list of pattern nodes and assigns a set of search spaces\n to all encountered nodes.\n :param nodes: List of pattern nodes that belongs to a search space\n :param space: List of search space id\n :return:\n \"\"\"\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)\n\n # Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.\n # Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,\n # e.g. ?s doap:name \"jenkins\" -> All ?s that don't match the filter will be ignored.\n self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))\n self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])\n\n patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))\n for tp in patterns:\n # A triple pattern belongs to a UNIQUE search space\n space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()\n self.__patterns[tp] = {'space': space}\n\n # Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),\n # it is required to extract different properties.\n tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()\n\n if tp_pred == RDF.type: # ?s a Concept\n self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n try:\n check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()\n except IndexError:\n check_type = True\n self.__patterns[tp]['check'] = check_type\n else: # ?s prop O\n self.__patterns[tp]['property'] = tp_pred\n tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal\n self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()\n elif isinstance(tp_obj, URIRef):\n self.__patterns[tp]['filter_object'] = tp_obj\n\n tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()\n if isinstance(tp_sub, URIRef):\n self.__patterns[tp]['filter_subject'] = tp_sub\n\n # Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards\n # in order to set the scope of each search space.\n nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))\n for n in nodes:\n if n not in self.__node_patterns:\n self.__node_patterns[n] = set([])\n self.__node_patterns[n].add(tp)\n __decorate_nodes(nodes, space)", "def variations():", "def get_conversions(self):\n query = prefixes + \"\"\"\n SELECT DISTINCT ?controller ?controllerName ?controllerActivity\n ?product ?productName ?reactant ?reactantName ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?rxn .\n ?subject a belvoc:AbundanceActivity .\n ?subject belvoc:hasActivityType ?controllerActivity .\n ?subject belvoc:hasChild ?controller .\n ?controller belvoc:hasConcept ?controllerName .\n ?rxn a belvoc:Reaction .\n ?rxn belvoc:hasChild ?reactants .\n ?reactants rdfs:label ?reactLabel .\n FILTER (regex(?reactLabel, \"^reactants.*\"))\n ?rxn belvoc:hasChild ?products .\n ?products rdfs:label ?prodLabel .\n FILTER (regex(?prodLabel, \"^products.*\"))\n ?reactants belvoc:hasChild ?reactant .\n ?products belvoc:hasChild ?product .\n ?reactant belvoc:hasConcept ?reactantName .\n ?product belvoc:hasConcept ?productName .\n }\n \"\"\"\n res = self.g.query(query)\n # We need to collect all pieces of the same statement so that we can\n # collect multiple reactants and products\n stmt_map = collections.defaultdict(list)\n for stmt in res:\n stmt_map[stmt[-1]].append(stmt)\n for stmts in stmt_map.values():\n # First we get the shared part of the Statement\n stmt = stmts[0]\n subj = self._get_agent(stmt[1], stmt[0])\n evidence = self._get_evidence(stmt[-1])\n stmt_str = strip_statement(stmt[-1])\n # Now we collect the participants\n obj_from_map = {}\n obj_to_map = {}\n for stmt in stmts:\n reactant_name = stmt[6]\n product_name = stmt[4]\n if reactant_name not in obj_from_map:\n obj_from_map[reactant_name] = \\\n self._get_agent(stmt[6], stmt[5])\n if product_name not in obj_to_map:\n obj_to_map[product_name] = \\\n self._get_agent(stmt[4], stmt[3])\n obj_from = list(obj_from_map.values())\n obj_to = list(obj_to_map.values())\n st = Conversion(subj, obj_from, obj_to, evidence=evidence)\n # If we've matched a pattern, mark this as a converted statement\n self.statements.append(st)\n self.converted_direct_stmts.append(stmt_str)", "def create_match_instance_pairs(plant_match_in):\r\n\t## collect all plant name match instances indices\r\n\tjust_indices = [int(indices) for plant_match_in_set in plant_match_in for indices in plant_match_in_set[1]]\r\n\t\r\n\tassert len(just_indices) == len(set(just_indices)) # check there are no exact duplicates in indices\r\n\r\n\tsorted_index = list(sorted(just_indices)) # sort indices small-large\r\n\tprint(f'Length of corpus bigrams BEFORE ambiguous matches removed: {len(sorted_index)}')\r\n\t#print(sorted_index)\r\n\r\n\t# remove all ambiguous matches that are within 1 word of each other\r\n\tprint('Ambiguous plant name matches: ')\r\n\tfor i, index in enumerate(sorted_index): # iterate over all indices in sorted list\r\n\t\t\r\n\t\tif index == sorted_index[i-1]+1: # indices is within 1 of previous indices in list\r\n\t\t\tprint(index, sorted_index[i-1])\r\n\t\t\tsorted_index.remove(index) # remove indices from list\r\n\t\t\tsorted_index.remove(sorted_index[i-1]) # AND remove previous indices from list\r\n\tprint(f'Length of corpus bigrams AFTER ambiguous matches removed: {len(sorted_index)}')\r\n\r\n\t# create indices dict with 'B', 'I' values\r\n\tpaired_finds = {}\r\n\tfor match_index in sorted_index: # iterate over unambiguous match indices list\r\n\t\t\t\t\t\r\n\t\t\tpaired_finds[match_index] = ('B') # WITH value of 'B'\r\n\t\t\t\r\n\t\t\tpaired_finds[match_index+1] = ('I') # WITH value of 'I'\r\n\r\n\treturn paired_finds", "def __init__(self, matches=None):\n super(MatchWarehouse, self).__init__()\n\n if matches is None:\n matches = []\n\n self.matches = matches", "def output_matches(self) -> List[str]:\n output = list()\n for match in sorted(self.matches):\n line = f\"{match[0]} - {match[1]}: Matching ({match[4]}) {match[2]} to {match[3]} on map\"\n logger.info(line)\n output.append(line)\n return output", "def __str__(self):\n return \"{}\".format(self._matches.keys())", "def _candidate_generation(self):\n doc = self.nlp(self.text)\n named_entity_dict = {}\n named_entity_key_list = []\n named_entity_value_list = []\n entity_from_text_list = []\n offline_dic_list = []\n matched_element_list = []\n\n for ent in doc.ents:\n named_entity = (str(ent.text) + ':' + str(ent.label_))\n named_entity = (named_entity.split(':'))\n # named_entity_key = named_entity[0].replace('\\n', '')\n # named_entity_key_list.append(named_entity_key)\n # named_entity_value = named_entity[1].replace('\\n', '')\n # named_entity_value_list.append(named_entity_value)\n named_entity_value = named_entity[1].replace('\\n', '')\n named_entity_value_list.append(named_entity_value)\n filtered_words = (str(ent.text).split())\n filtered_words = [w for w in filtered_words if w.lower() not in self.english_stopwords]\n named_entity_key = [' '.join(filtered_words)]\n for i in named_entity_key:\n named_entity_key_list.append(i)\n for key in named_entity_key_list:\n named_entity_dict[key] = []\n i = 0\n for key in named_entity_key_list:\n named_entity_dict[key].append(named_entity_value_list[i])\n i = i + 1\n\n entities = \"ORG PERSON LOC GPE\".split()\n for entity in entities:\n entity_from_text = [k for k, v in named_entity_dict.items() if entity in v]\n for item in entity_from_text:\n entity_from_text_list.append(item)\n\n if not entity_from_text_list:\n self.logger.info('No named entity found in the input text')\n else:\n self.logger.info('Entities which are identified from the input sentence')\n self.logger.info(entity_from_text_list)\n\n for key, value in self.offline_dic.items():\n offline_dic_list.append(key)\n\n for item in entity_from_text_list:\n for item1 in offline_dic_list:\n if item == item1:\n matched_element_list.append(item)\n\n big_final_dict = []\n for i in matched_element_list:\n candidate_list = [v for k, v in self.offline_dic.items() if str(k) == str(i)]\n final_dict = dict(zip(i.split('\\n'), candidate_list))\n big_final_dict.append(final_dict)\n\n if not big_final_dict:\n self.logger.warning(\"No Match found in the KB\")\n return matched_element_list, None\n else:\n self.logger.info('found entities')\n return matched_element_list, big_final_dict", "def get_complexes(self):\n q_cmplx = prefixes + \"\"\"\n SELECT ?complexTerm ?childName ?child ?stmt\n WHERE {\n {\n {?stmt belvoc:hasSubject ?complexTerm}\n UNION\n {?stmt belvoc:hasObject ?complexTerm .}\n UNION\n {?stmt belvoc:hasSubject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n UNION\n {?stmt belvoc:hasObject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n }\n ?complexTerm a belvoc:Term .\n ?complexTerm a belvoc:ComplexAbundance .\n ?complexTerm belvoc:hasChild ?child .\n ?child belvoc:hasConcept ?childName .\n }\n \"\"\"\n # Run the query\n res_cmplx = self.g.query(q_cmplx)\n\n # Store the members of each complex in a dict of lists, keyed by the\n # term for the complex\n cmplx_dict = collections.defaultdict(list)\n cmplx_ev = {}\n for stmt in res_cmplx:\n stmt_uri = stmt[3]\n ev = self._get_evidence(stmt_uri)\n for e in ev:\n e.epistemics['direct'] = True\n cmplx_name = term_from_uri(stmt[0])\n cmplx_id = stmt_uri + '#' + cmplx_name\n child = self._get_agent(stmt[1], stmt[2])\n cmplx_dict[cmplx_id].append(child)\n # This might be written multiple times but with the same\n # evidence\n cmplx_ev[cmplx_id] = ev\n # Now iterate over the stored complex information and create binding\n # statements\n for cmplx_id, cmplx_list in cmplx_dict.items():\n if len(cmplx_list) < 2:\n msg = 'Complex %s has less than 2 members! Skipping.' % \\\n cmplx_name\n logger.warning(msg)\n else:\n self.statements.append(Complex(cmplx_list,\n evidence=cmplx_ev[cmplx_id]))", "def generate_matched_stars(reduction_metadata,log):\n\n matched_stars = match_utils.StarMatchIndex()\n reduction_metadata.star_catalog[1]\n matched_stars.cat1_index = list(reduction_metadata.star_catalog[1]['index'])\n matched_stars.cat1_ra = list(reduction_metadata.star_catalog[1]['ra'])\n matched_stars.cat1_dec = list(reduction_metadata.star_catalog[1]['dec'])\n matched_stars.cat1_x = list(reduction_metadata.star_catalog[1]['x'])\n matched_stars.cat1_y = list(reduction_metadata.star_catalog[1]['y'])\n matched_stars.cat2_index = list(reduction_metadata.star_catalog[1]['index'])\n matched_stars.cat2_ra = list(reduction_metadata.star_catalog[1]['ra'])\n matched_stars.cat2_dec = list(reduction_metadata.star_catalog[1]['dec'])\n matched_stars.cat2_x = list(reduction_metadata.star_catalog[1]['x'])\n matched_stars.cat2_y = list(reduction_metadata.star_catalog[1]['y'])\n matched_stars.separation = [0.0] * len(reduction_metadata.star_catalog[1]['index'])\n matched_stars.n_match = len(matched_stars.cat1_index)\n\n matrix = np.zeros( (3,3) )\n transform = AffineTransform(matrix=matrix)\n\n log.info('Generated single-dataset null matched_stars and transform')\n\n return transform, matched_stars", "def cypher(self):\n kwargs = {'match': '',\n 'optional_match': '',\n 'where': '',\n 'with': '',\n 'return': ''}\n\n # generate initial match strings\n\n match_strings = set()\n withs = set()\n nodes = self.required_nodes()\n for node in nodes:\n if node.has_subquery:\n continue\n match_strings.add(node.for_match())\n withs.update(node.withs)\n\n kwargs['match'] = 'MATCH ' + ',\\n'.join(match_strings)\n\n # generate main filters\n\n properties = []\n for c in self._criterion:\n if c.in_subquery:\n continue\n properties.append(c.for_cypher())\n if properties:\n kwargs['where'] += 'WHERE ' + '\\nAND '.join(properties)\n\n optional_nodes = self.optional_nodes()\n optional_match_strings = []\n for node in optional_nodes:\n if node.has_subquery:\n continue\n optional_match_strings.append(node.for_match())\n withs.update(node.withs)\n if optional_match_strings:\n s = ''\n for i, o in enumerate(optional_match_strings):\n s += 'OPTIONAL MATCH ' + o + '\\n'\n kwargs['optional_match'] = s\n\n # generate subqueries\n\n with_statements = ['WITH ' + ', '.join(withs)]\n\n for node in nodes:\n if not node.has_subquery:\n continue\n statement = node.subquery(withs, self._criterion)\n with_statements.append(statement)\n\n withs.update(node.withs)\n\n for node in optional_nodes:\n if not node.has_subquery:\n continue\n statement = node.subquery(withs, self._criterion, optional=True)\n with_statements.append(statement)\n\n withs.update(node.withs)\n kwargs['with'] = '\\n'.join(with_statements)\n\n kwargs['return'] = self.generate_return()\n cypher = self.query_template.format(**kwargs)\n\n return cypher", "def build_matches(self, noise=0):\n for player1_index in range(len(self.players)):\n for player2_index in range(player1_index, len(self.players)):\n pair = (\n self.players[player1_index], self.opponents[player2_index])\n match = self.build_single_match(pair, noise)\n yield (player1_index, player2_index), match", "def hits(self):\n for record in self._results.items:\n # Project the record\n projection = self._schema.dump(\n record,\n context=dict(\n identity=self._identity,\n record=record,\n ),\n )\n if self._links_item_tpl:\n projection[\"links\"] = self._links_item_tpl.expand(\n self._identity, record\n )\n\n yield projection", "def test_build_match_tree_with_pairs():\n abbreviation_list = [[\"ELIF\", \"ELI.\"], [\"ELSE\", \"E.\"]]\n expected_tree = {\"E\": {\"L\": {\"I\": {\"F\": \"ELI.\"}, \"S\": {\"E\": \"E.\"}}}}\n tree = build_match_tree(abbreviation_list)\n assert repr(tree) == repr(expected_tree)", "def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures", "def generate_objects(input_data):\n object_list = []\n for match_list in input_data:\n if len(match_list) == 5:\n object_list.append(Match(match_list[0], match_list[1], match_list[2], '', match_list[3], match_list[4]))\n else:\n object_list.append(\n Match(match_list[0], match_list[1], match_list[2], match_list[3], match_list[4], match_list[5]))\n return object_list", "def MatchAll():\n return {\"match_all\": {}}", "def describe_detailed(self) -> str:\n one_to_one = []\n one_to_many = []\n many_to_one = []\n many_to_many = []\n cats_a: set[Category] = set()\n cats_b: set[Category] = set()\n for rule in self.rules:\n cats_a.update(rule.factors_categories_a.keys())\n cats_b.update(rule.factors_categories_b.keys())\n if rule.cardinality_a == \"one\" and rule.cardinality_b == \"one\":\n one_to_one.append(rule)\n elif rule.cardinality_a == \"one\":\n one_to_many.append(rule)\n elif rule.cardinality_b == \"one\":\n many_to_one.append(rule)\n else:\n many_to_many.append(rule)\n\n cat_a, cat_b = self.categorization_a.name, self.categorization_b.name\n\n r = f\"# Mapping between {cat_a} and {cat_b}\\n\\n\"\n r += \"## Simple direct mappings\\n\\n\"\n r += \"\\n\".join(\n rule.format_human_readable(categorization_separator=\"\")\n for rule in one_to_one\n )\n r += \"\\n\\n\"\n r += f\"## One-to-many mappings - one {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in one_to_many)\n r += \"\\n\\n\"\n r += f\"## Many-to-one mappings - many {cat_a} to one {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_one)\n r += \"\\n\\n\"\n r += f\"## Many-to-many mappings - many {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_many)\n r += \"\\n\\n\"\n\n r += \"## Unmapped categories\\n\\n\"\n cats_missing_a = set(self.categorization_a.values()) - cats_a\n cats_missing_b = set(self.categorization_b.values()) - cats_b\n r += f\"### {cat_a}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_a)) + \"\\n\\n\"\n r += f\"### {cat_b}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_b)) + \"\\n\\n\"\n\n return r", "def catalogmatch(conn, sources, catalog, imobj, search_radius, save):\n catalog_matched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n \n match_logger.info('Attempting to match {} sources from this image to '\n 'the {} sky catalog...'.format(len(sources), catalog))\n\n # Print results without saving to database\n if not save:\n # Dump sources into a temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor within FOV & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS catalog_src_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, 2.*imobj.radius)\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n\n match_logger.info('-------------------------------------------------'\n '-------------')\n match_logger.info('VLITE_src_id match catalog_src_id '\n 'separation (arcsec)')\n match_logger.info('-------------------------------------------------'\n '-------------') \n for row in rows:\n if row['match']:\n catalog_matched.append(row['catalog_src_id'])\n match_logger.info('{}\\t\\t{}\\t{}\\t{}'.format(\n row['src_id'], row['match'], row['catalog_src_id'], row['sep']))\n\n # Store results for insertion into database\n else:\n # Skip the sources which already have results for this catalog\n # (from a different image)\n assoc_ids = []\n for src in sources:\n already_matched = dbio.check_catalog_match(conn, src.id, catalog)\n if already_matched:\n continue\n else:\n assoc_ids.append(src.id)\n match_logger.info(' -- found previous matching results for {} sources'.\n format(len(sources) - len(assoc_ids)))\n\n # Find nearest neighbor within half a beam\n sql = '''SELECT a.id AS assoc_id, bb.*, \n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep\n FROM assoc_source AS a, LATERAL (\n SELECT b.* FROM radcat.{} AS b\n WHERE a.id IN %s AND q3c_join(a.ra, a.dec, b.ra, b.dec, %s)\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (tuple(assoc_ids), (0.5*(imobj.bmin/3600.)))\n cur.execute(psycopg2.sql.SQL(sql).format(\n psycopg2.sql.Identifier(catalog)), values)\n rows = cur.fetchall()\n\n matched_ids = []\n for row in rows:\n matched_ids.append(row['assoc_id'])\n csrc = catalogio.CatalogSource()\n dbclasses.dict2attr(csrc, row)\n catalog_matched.append(csrc)\n\n for src in sources:\n if src.id in matched_ids:\n # Found a match!\n try:\n src.nmatches += 1\n except TypeError:\n src.nmatches = 1\n else:\n if src.nmatches is None:\n src.nmatches = 0\n\n cur.close()\n\n match_logger.info (' -- number of matches: {}'.format(len(catalog_matched)))\n\n return sources, catalog_matched", "def create_vocabs(self):\r\n print('Creating vocabs...')\r\n\r\n # Update surface_char2id\r\n unique_surfaces = set(chain(*[sentence.surface_words for sentence in self.sentences]))\r\n unique_chars = set(chain(*[surface for surface in unique_surfaces]))\r\n for ch in unique_chars:\r\n self.surface_char2id[ch] = len(self.surface_char2id)\r\n\r\n # Update lemma_char2id\r\n unique_lemmas = set(chain(*[sentence.lemmas for sentence in self.sentences]))\r\n unique_chars = set(chain(*[lemma for lemma in unique_lemmas]))\r\n for ch in unique_chars:\r\n self.lemma_char2id[ch] = len(self.lemma_char2id)\r\n\r\n # Update transformation2id\r\n for sentence in self.sentences:\r\n for transformation in sentence.transformations:\r\n for _t in transformation:\r\n if _t not in self.transformation2id:\r\n self.transformation2id[_t] = len(self.transformation2id)\r\n\r\n # Update morph_tag2id\r\n unique_morph_tags = list(chain(*[sentence.morph_tags for sentence in self.sentences]))\r\n unique_tags = set(chain(*[morph_tag for morph_tag in unique_morph_tags]))\r\n for tag in unique_tags:\r\n self.morph_tag2id[tag] = len(self.morph_tag2id)\r\n print('Surface Chars={}, Lemma Chars={}, Transformations={}, tags={}'.format(\r\n len(self.surface_char2id), len(self.lemma_char2id), len(self.transformation2id), len(self.morph_tag2id)\r\n ))", "def _generate_matches_pairs(self):\n for name in self.remaining:\n a = []\n for file in os.listdir(os.path.join(self.data_dir, name)):\n if self.img_ext in file:\n a.append(os.path.join(name, file))\n\n if a:\n with open(self.pairs_filepath, \"a\") as f:\n for i in range(self.num_random_images_per_folder):\n temp = random.choice(a).split(self.separator) # This line may vary depending on how your images are named.\n w = self.separator.join(temp[:-1])\n\n l = random.choice(a).split(self.separator)[-1]\n r = random.choice(a).split(self.separator)[-1]\n\n print(\"For '\" + os.path.join(self.data_dir, name) + \"' and counter: \", self.counter, ', Match Pair:', w + \" -> \" + l\n + \", \" + r)\n\n f.write(w + \"\\t\" + l + \"\\t\" + r + \"\\n\")\n self.counter += 1", "def _get_matches(self, matches: OutputStream):\n for match in self._tree.get_matches():\n matches.add_item(match)\n self._remove_matched_freezers(match.events)", "def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)", "def __init__(self, pattern, cdict, ont):\n # Perhaps should be extended to allow specification of relations too?\n # Plus this dict of dict struc feels v.clunky to work with. Either change or parse before using.\n self.pattern = pattern # pattern python data structure\n self.ont = ont\n self.validate_abstract_pattern() # important to check that pattern is safe to apply. \n self.cdict = cdict # dict of name : id tuples\n \n # add entites from cdict to pattern dictionary\n for v in cdict.values():\n self.pattern[\"classes\"][v[0]]=v[1]\n self.validate_applied_pattern()\n self.label = self._var_name_sub(self.pattern['name'])\n self.definition = self._var_name_sub(self.pattern['def'])\n \n # For each logical axioms type, add set to False if not present, otherwise sub var, then convert to IDs\n \n self.equivalentTo = False\n if \"equivalentTo\" in self.pattern: \n self.equivalentTo = self.name2Id(self._var_id_sub(self.pattern['equivalentTo']))\n self.subClassOf = False\n if \"subClassOf\" in self.pattern:\n self.subClassOf = self.name2Id(self._var_id_sub(self.pattern['subClassOf']))\n self.GCI = False \n if \"GCI\" in self.pattern:\n self.GCI = self.name2Id(self._var_id_sub(self.pattern['GCI']))", "def newAnalyzer():\n analyzer = {'crimes': None,\n 'dateIndex': None,\n 'autors': None,\n 'instrumentalness': None,\n 'tempo':None,\n 'liveness':None,\n 'speechiness':None,\n 'danceability':None,\n 'valence':None,\n 'loudness':None,\n 'acousticness':None,\n 'energy':None,\n 'generos':None\n }\n\n analyzer['crimes'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['ids'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n\n analyzer['autors'] = om.newMap(omaptype='RBT',\n comparefunction=compareAUTOR)\n\n analyzer['instrumentalness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['tempo'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['liveness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['danceability'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n analyzer['valence'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['loudness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['energy'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n\n analyzer['generos']= m.newMap(11,\n maptype='CHAINING',\n loadfactor=4.0)\n \n return analyzer", "def build_match_tree(abbreviation_list):\n match_tree = {}\n for word, abbreviation in abbreviation_list:\n tree_node = match_tree\n for letter in word[:-1]:\n if letter not in tree_node:\n tree_node[letter] = {}\n tree_node = tree_node[letter]\n tree_node[word[-1]] = abbreviation\n return match_tree", "def newCatalog():\n catalog = {'videosContext': None,\n 'caraContenido': None,\n 'musicalGenero': None,\n 'fechaMusica': None}\n\n catalog['videosContext'] = lt.newList('ARRAY_LIST')\n catalog['caraContenido'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['musicaGenero'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['fechaMusica'] = om.newMap('RBT')\n\n return catalog" ]
[ "0.5617624", "0.53872037", "0.53387195", "0.5322842", "0.52577996", "0.5231398", "0.52225256", "0.5212459", "0.5200561", "0.50921756", "0.5079429", "0.5012638", "0.501046", "0.4984634", "0.4966179", "0.4954377", "0.4953587", "0.4943859", "0.49416387", "0.49314514", "0.4904986", "0.49019152", "0.49018523", "0.48973343", "0.48590806", "0.48576093", "0.48572847", "0.4850995", "0.4839368", "0.48313186" ]
0.64208984
0
Returns photon energy in keV if specified in eV or keV
def keV(E): if np.min(E) >= 100: return E / 1000 else: return E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def energy_Photon(freq=1.0,energy=0.0):\n global r,c,h\n if freq ==0:\n print(\"enerji yok...\")\n return 0\n if energy != 0:\n energy =energy\n else:\n energy = h*freq\n getit =str(input(\"frekans bulmak istiyorsaniz f,yoksa bos gecin.\"))\n if getit ==\"f\":\n return ('%.2E' % Decimal(str(energy/h)))\n return float('%.2E' % Decimal(str(energy)))", "def eV(E):\n if np.max(E) < 100:\n return E * 1000\n else:\n return E", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def energy(energy_name: str) -> float:\n pass", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def kinetic_energy(v, Mm=1.):\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n # timeit.timeit('vt[:,0]**2+vt[:,1]**2', setup='import numpy as np; vt = np.random.rand(10000,2)', number=1000)\n KE = 0.5 * sum(Mm * speed_squared)\n return KE", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def ev2ve(eV): \n return cv*np.sqrt( eV*(eV+2.e0*mec2))/(eV+mec2)", "def _calc_energy( self, V_a, eos_d ):\n pass", "def particle_energies_Nashgyro(xyv, NL, KL, BM_rest, OmK, Omg):\n # Split xyv\n xy = xyv[:, 0:2]\n v = xyv[:, 2:4]\n\n # Potential energy\n BL = NL2BL(NL, KL)\n bo = BM2bL(NL, BM_rest, BL)\n bL = bond_length_list(xy, BL)\n kL = KL2kL(NL, OmK, BL)\n U = 0.5 * abs(kL) * (bL - bo) ** 2\n # # Check\n # print 'KL = ', KL\n # print 'BL = ', BL\n # print 'bo = ', bo\n # print 'kL = ', kL\n # print 'U = ', U\n\n # Kinetic energy\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n KE = 0.5 * (abs(Omg) * speed_squared)\n\n # Check\n if (U < 0).any() or (KE < 0).any():\n print 'KE = ', KE\n print 'U = ', U\n print 'kL*(bL-bo)**2 = ', kL * (bL - bo) ** 2\n print 'kL = ', kL\n raise RuntimeError('NEGATIVE ENERGY!')\n\n return U, KE", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK", "def convert_eV_H(en_eV):\n return en_eV*eV_H", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def energy_PhotonFormula(wave=1.0,energy=0.00):\n global r,c,h\n print(\"Enerji var ise lutfen giriniz.\")\n if energy != 0:\n energy = energy\n else:\n energy=h*(c/wave)\n getit =str(input(\"Dalga boyunu istiyorsaniz d,enerji istiyorsaniz bos birakin.\"))\n if getit == 'd':\n return ('%.2E' % Decimal(str(energy/(h*c))))\n elif getit ==\"\":\n ('%.2E' % Decimal(str(energy)))\n print(\"Yanlis girdi.Yeniden dene.\")\n return energy_PhotonFormula(wave)", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def Eaho(ve, n, D0):\r\n\r\n De = D0 + Eho(ve, 0)\r\n vv = h*ve*(n+1/2) - (h*ve)**2 / (4*De) * (n+1/2)**2 \r\n return vv # *(10**9) # h*nu*(v+1/2) - (h*nu)**2 / (4*De) * (v+1/2)**2\r", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot" ]
[ "0.6925107", "0.67266333", "0.6649382", "0.6463622", "0.6386686", "0.63505965", "0.6302297", "0.6284374", "0.6248313", "0.62413824", "0.6190136", "0.6178548", "0.61690694", "0.616394", "0.6122586", "0.61173284", "0.6104944", "0.60898596", "0.6080368", "0.60786605", "0.607754", "0.60580975", "0.5991169", "0.5989961", "0.5971236", "0.59577984", "0.5956364", "0.5955703", "0.5928764", "0.59286624" ]
0.6767055
1
Calculate the attenuation length (m) at given energies E in keV (vectorized) density in g/cm3, None=default density
def attenuationLength(matID, keV, density=None): return 1.0 / mu(matID, keV, density)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debye_length_m(electron_density, electron_temperature):\n return 0.069 * np.sqrt(electron_temperature / electron_density)", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def debye_length(eps_0, electron_temperature, electron_density, elementary_charge):\n return np.sqrt(eps_0*electron_temperature/electron_density/abs(elementary_charge))", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def _calc_energy( self, V_a, eos_d ):\n pass", "def debye_length(T_e, n_e):\n return math.sqrt((T_e * spc.eV * spc.epsilon_0) / (n_e * spc.elementary_charge**2))", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def get_xray_attenuation_length(self, energy=13.0):\n \n try:\n att_len = self.xray_properties[energy][-1]\n except KeyError:\n # Interpolate instead\n #energy_close = min(self.xray_properties.keys(), key=lambda k: abs(k-energy))\n\n keys = np.sort(self.xray_properties.keys())\n idx = -1\n for i, key in enumerate(keys):\n if idx==-1 and key>energy:\n idx = i\n \n energy_low = keys[idx-1]\n energy_high = keys[idx]\n extent = (energy-energy_low)/(energy_high-energy_low)\n \n att_len = self.xray_properties[energy_high][-1]*extent + self.xray_properties[energy_low][-1]*(1.0-extent)\n \n return att_len", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def electron_density_per_m3(self):\n return self.electron_density * 1e6", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def computeDebyeLength(self, Te, ne):\n \n return np.sqrt(self.EPS_0*self.K*Te/(ne*self.QE**2))", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def E_total(mesh, edges, width, height):\n # Check the model contains vertex colors.\n if (len(mesh.vc) != len(mesh.v)):\n raise ValueError(\n \"Mesh does not contain an equal number vertex colors \"\n \"and vertices.\")\n\n # Sum up the energy coefficient matrices for all the edge pairs\n N = width * height\n depth = len(mesh.vc[0])\n\n Q = AccumulateCOO()\n L = AccumulateCOO()\n C = scipy.sparse.csc_matrix((depth, depth))\n\n sum_edge_lens = 0.0\n desc = \"Building Seam Value of Lerp Energy Matrix\"\n disable_pbar = logging.getLogger().getEffectiveLevel() > logging.INFO\n for i, edge in enumerate(tqdm(edges, unit=\"edges\", disable=disable_pbar,\n desc=desc)):\n # Calculate the 3D edge length\n verts = [numpy.array(mesh.v[mesh.f[edge[0]][i].v]) for i in edge[1]]\n edge_len = numpy.linalg.norm(verts[1] - verts[0])\n sum_edge_lens += edge_len\n\n # Compute the QuadEnergy of the edge.\n Qe, Le, Ce = E_edge(mesh, edge, width, height, edge_len)\n # Q += Qe\n Q.add(Qe)\n # L += Le\n L.add(Le)\n C += Ce\n\n Q = Q.total((N, N))\n L = L.total((N, depth))\n\n # Divide by the total edge length in 3D\n return QuadEnergy(\n (Q / sum_edge_lens).tocsc(), -L / sum_edge_lens, C / sum_edge_lens)", "def density_from_fluorescence_for_el(p, q, maia_d, el):\n # override absorption settings temporarily to get the response with abs off\n conf_o = config.no_out_absorption\n conf_i = config.no_in_absorption\n config.no_out_absorption = True\n config.no_in_absorption = True\n sinogram = projection.project_sinogram(event_type='fluoro', p=p, q=q,\n maia_d=maia_d, anglelist=[0], el=el)\n # restore overridden absorption settings\n config.no_out_absorption = conf_o\n config.no_in_absorption = conf_i\n\n # Rescale sinogram pixel quantities based on pixel side length.\n # Note: I don't need to do this, assuming the length scale is not changing\n # sinogram *= (UM_PER_CM / p.um_per_px) ** 2\n\n # Now just integrate the density [g/cm3] in the elemental map.\n mass = p.el_maps[el]\n\n return mass.sum() / sinogram.sum()", "def density_lens(self, r, alpha_1, w_c, w_t, e1=0, e2=0, center_x=0, center_y=0):\n theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(\n alpha_1, w_c, w_t, e1, e2\n )\n f_1 = self._nie_1.density_lens(\n r, theta_E_conv, e1, e2, s_scale_1, center_x, center_y\n )\n f_2 = self._nie_2.density_lens(\n r, theta_E_conv, e1, e2, s_scale_2, center_x, center_y\n )\n f_ = f_1 - f_2\n return f_", "def edge_density(self) -> float:\n return self.number_of_edges() / (\n self.number_of_nodes() * self.number_of_nodes()\n )", "def electron_density(self):\n return N_avo * self.num_electrons * self.density / self.molar_mass", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def keV(E):\n if np.min(E) >= 100:\n return E / 1000\n else:\n return E" ]
[ "0.69875747", "0.683513", "0.68281406", "0.64340115", "0.640952", "0.6275915", "0.6182579", "0.6081061", "0.60733867", "0.59705806", "0.59496653", "0.59434277", "0.59365296", "0.590933", "0.5900445", "0.5885812", "0.58835375", "0.5881771", "0.5863816", "0.58308667", "0.5828801", "0.5820045", "0.58154887", "0.57945406", "0.5793978", "0.57934725", "0.5786573", "0.5768991", "0.5765328", "0.5723056" ]
0.6993646
0
Calculate the eV/atom at given energies with mu_en
def eVatom_en(matID, keV, mJ, rms_mm, density=None): if density == None: density = defaultDensity(matID) attL = 1.0 / mu_en(matID, keV, density) EdensityJcm3 = mJ/1000 / (2 * np.pi * attL*u['cm'] * (rms_mm*0.1)**2) atomVolcm3 = atomWeight(matID) / c['NA'] / density return EdensityJcm3 * atomVolcm3 / 1.6e-19
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def _calc_energy( self, V_a, eos_d ):\n pass", "def get_o_energies(mol):\n try:\n ev_to_hartree = 1./convertor(1,'hartree','eV')\n g=hack_parser.Gaussian(mol.calc.log, loglevel=50)\n d=g.parse()\n #lm, hm, lr\n o_component_es = np.array(d.oniomenergies)\n except AttributeError:\n return 0\n\n return (ev_to_hartree * o_component_es * [-1,1,1]).sum(axis=1)", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def convert_kcalmol_eV(en_kcalmol):\n return en_kcalmol*kcalmol_eV", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def vi2ev(v,mu):\n return 0.5*mu*mp*v**2/eV2J", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def msqrd_mu_to_enunu(momenta):\n pe = momenta[:, 0]\n pve = momenta[:, 1]\n pvmu = momenta[:, 2]\n\n pmu = np.sum(momenta, axis=1)\n\n return 64.0 * GF**2 * ldot(pe, pvmu) * ldot(pmu, pve)", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def energy_scattered_electron(photonEnergy=1, units=eV):\n\n var = sy.var('m_e c E')\n par = units['m_e'], units['c'], photonEnergy\n\n m0 = m_e * c**2\n y = m0 / ( (m0/E) + 2 )\n\n return dic_result(var,par,y)", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def ev2vi_n(eV,mu):\n return np.sqrt(2.*eV*eV2J/(mu*mp))", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def mu_en(matID, keV, density=None):\n mat = goodID(matID)\n if density == None:\n density = defaultDensity(matID)\n if np.isscalar(keV):\n energies = np.array([keV], dtype=np.double)\n else:\n energies = np.array(keV, dtype=np.double)\n _mu = np.array([xl.CS_Energy_CP(mat, eng) * density * u['cm'] for eng in energies])\n if np.isscalar(keV):\n return np.asscalar(_mu)\n else:\n return _mu", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def iu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n return i_val", "def ev2vi_fast(eV,mu):\n return 1.3841122485902230e+04/np.sqrt(mu)*np.sqrt(eV)", "def total_potential_energy(R,M,G):\r\n U = 0\r\n N = R.shape[0] \r\n \r\n for n in range(N):\r\n for nn in range(n+1,N):\r\n U = U - G*M[n]*M[nn] / util.enod(R[n,:],R[nn,:])\r\n \r\n return U", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def eVatom(matID, keV, mJ, rms_mm, density=None):\n if density == None:\n density = defaultDensity(matID)\n attL = attenuationLength(matID, keV, density)\n EdensityJcm3 = mJ/1000 / (2 * np.pi * attL*u['cm'] * (rms_mm*0.1)**2)\n atomVolcm3 = atomWeight(matID) / c['NA'] / density\n return EdensityJcm3 * atomVolcm3 / 1.6e-19" ]
[ "0.70589775", "0.677859", "0.6765921", "0.67600095", "0.6704203", "0.6699837", "0.6672238", "0.664315", "0.66347003", "0.66338253", "0.6538472", "0.6527403", "0.6355422", "0.63233757", "0.63145655", "0.6307296", "0.62996507", "0.6297362", "0.62903994", "0.62738526", "0.62570906", "0.62540823", "0.62538445", "0.62231684", "0.621786", "0.62130606", "0.6209621", "0.6199065", "0.61919296", "0.6178806" ]
0.68466586
1
Return the material drill speed (mm/s) based on vaporization heat
def drillSpeed(matID, W, FWHM_mm): vaporH = { # kJ/mol # spec heat from room temperature to melting + latent heat of fusion + spec heat from melting to boiling + latent heat of vaporization # refer https://webbook.nist.gov/chemistry/ for heat capacity, this tool also has some data in specificHeatParams 'Cu': 29.7 + 13.1 + 48.8 + 300, 'Fe': 49.5 + 13.8 + 60.9 + 340, 'W' : 118.4 + 46.9 + 82.5 + 824, 'Mo': 89.9 + 37.5 + 71.9 + 598, 'Al': 17.98 + 10.7 + 0.03175*1857 + 294 } if matID not in vaporH.keys(): raise ValueError(f'No vaporization data for {matID}: available in {vaporH.keys()}') mol_mmD = 2 * np.pi * (FWHM_mm/2.355)**2 / 1000 * defaultDensity(matID) / molarMass(matID) return W / (mol_mmD * vaporH[matID] * 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def drillTime(matID, thickness_mm, W, FWHM_mm):\n return thickness_mm / drillSpeed(matID, W, FWHM_mm)", "def molar_mass_dry_air():\n return 28.9647", "def get_speed(self):\n return self.get_par(\"slew_speed\")", "def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def lightSpeed():\n return const.c.value", "def get_velocity(self):\n return self.momentum/self.mass", "def cuttingSpeed(self, diameter, rpm):\n return (math.pi * diameter * rpm)/12", "def lightspeed(self):\n return self._lightspeed", "def wheel_rpm_to_speed(r):\n return r * wheel_circumference / 120.0", "def get_speed(self, hero):\n vel = hero.get_velocity()\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def mTV(self):\n distance = abs(self.vertPosT - self.vertPosW) # distance between htp and vortex shred plane,\n # approximated with the wing root chordplane\n return distance / (self.spanW / 2)", "def get_cmd_velocity(self):\n return self.gripper_io.get_signal_value(\"speed_mps\")", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def get_custom_speed(self):\n return self._custom_speed", "def maTail(self):\n return self.maCruise * sqrt(self.speedRatio)", "def get_speed(self):\n raise NotImplementedError", "def get_speed(self):\n raise NotImplementedError", "def speed(self) -> int:", "def speed(self) -> int:", "def get_speed(self):\n return self._speed", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def speed(self):\n self.convert_window(\"Speed\", \"meters/second\", [\"Mach number\", \"Nm/24hr\", \"centimeters/minute\", \"centimeters/second\", \"feet/hour\", \"feet/minute\", \"feet/second\", \"inches/minute\", \"inches/second\", \"kilometers/hour\", \"kilometers/second\", \"knots\", \"meters/hour\", \"meters/minute\", \"meters/second\", \"miles/hour\", \"miles/minute\", \"miles/second\", \"nautical miles/hour\", \"speed of light\", \"speed of sound\", \"yards/hour\", \"yards/minute\", \"yards/second\"])", "def estimated_speed(self):\n return self._estimates[3].item(0)", "def speed(self) -> float:\n return linalg.norm(self.velocity)", "def getMotorSpeed(self):\n cmd = 'E'\n vel = [-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n if out[0] == 'e':\n isStart = False\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n vel[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n vel[j] = int(data)\n return vel", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def wind_speed(self):\n return self.flow_field.wind_speed", "def get_speed(vehicle):\n vel = vehicle.get_velocity()\n velocity = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)\n\n return np.array([velocity])" ]
[ "0.7335393", "0.7025321", "0.6622663", "0.6536985", "0.65128434", "0.6388492", "0.6367922", "0.6348878", "0.62760735", "0.62732047", "0.62524635", "0.624983", "0.6247133", "0.62403464", "0.6176493", "0.6170497", "0.6143984", "0.6113371", "0.6113371", "0.6112807", "0.6112807", "0.61007565", "0.6046692", "0.60011476", "0.60009265", "0.60002035", "0.5999014", "0.5991123", "0.5989105", "0.5984801" ]
0.73165506
1
Return the material drill time based on vaporization heat
def drillTime(matID, thickness_mm, W, FWHM_mm): return thickness_mm / drillSpeed(matID, W, FWHM_mm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_specific_heat() -> float:\n return 1006.0", "def drillSpeed(matID, W, FWHM_mm):\n vaporH = { # kJ/mol\n # spec heat from room temperature to melting + latent heat of fusion + spec heat from melting to boiling + latent heat of vaporization\n # refer https://webbook.nist.gov/chemistry/ for heat capacity, this tool also has some data in specificHeatParams\n 'Cu': 29.7 + 13.1 + 48.8 + 300,\n 'Fe': 49.5 + 13.8 + 60.9 + 340,\n 'W' : 118.4 + 46.9 + 82.5 + 824,\n 'Mo': 89.9 + 37.5 + 71.9 + 598,\n 'Al': 17.98 + 10.7 + 0.03175*1857 + 294\n }\n if matID not in vaporH.keys():\n raise ValueError(f'No vaporization data for {matID}: available in {vaporH.keys()}')\n \n mol_mmD = 2 * np.pi * (FWHM_mm/2.355)**2 / 1000 * defaultDensity(matID) / molarMass(matID)\n return W / (mol_mmD * vaporH[matID] * 1000)", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def get_duration_heat(hobo, start, stop):\n alltimes = []\n heat = 0\n numsecs = 0\n for i, t in enumerate(hobo[\"Temp\"][start:stop]):\n if t > TEMP_FLOOR:\n alltimes.append(hobo[\"Time\"][start:stop][i])\n heat += 1/60.0 * t\n numsecs+=1\n if len(alltimes) < 1 : return (timedelta(0),0,0)\n begin = min(alltimes)\n end = max(alltimes)\n return(end-begin,numsecs, heat)", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def Temp(t):\n return 20 # Need to link to data", "def molar_mass_dry_air():\n return 28.9647", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def t_rh_2_dewT(ds, var):\n ds['dew'] = 243.04 * (np.log(ds[var['rh']] / 100) + ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))/\\\n (17.625-np.log(ds[var['rh']] / 100) - ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))\n return ds", "def _calculate_heat_duration(self, dur):\n factor = 0\n\n if dur >= 0xFC:\n durval = 0xFF #Max duration\n else:\n while dur > 0x3F:\n dur = dur / 4\n factor += 1\n durval = dur + (factor * 64)\n\n return durval", "def mv_delay(self):\n return self._TERRAINS[self.terrain]['delay']", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def calc_supply_temp(tr, Q, m, cp, case):\n if m > 0:\n if case == \"DH\":\n ts = tr + Q / (m * cp)\n else:\n ts = tr - Q / (m * cp)\n else:\n ts = 0\n return ts", "def mTV(self):\n distance = abs(self.vertPosT - self.vertPosW) # distance between htp and vortex shred plane,\n # approximated with the wing root chordplane\n return distance / (self.spanW / 2)", "def time(self) -> int:\n return self.__droneTime", "def time(self) -> float:\n return self.sim_scene.data.time", "def drift_time(conversion_depth):\n effective_depth = maximum(field_start, conversion_depth)\n #Begin black box - trust me, you don't want to know how this works!\n return ((thickness - effective_depth) / saturation_velocity + square(thickness) *\n log((1 + voltage_ratio) / (2 * effective_depth / thickness + voltage_ratio -\n 1)) / (2 * low_field_mobility * depletion_voltage) + maximum((field_start -\n conversion_depth) * saturation_velocity, 0))\n #End black box", "def getDustTemperature(grid=None, ppar=None):\n op = dustopac.radmc3dDustOpac()\n dinfo = op.readDustInfo()\n ngs = len(dinfo['gsize'])\n\n tdust = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64)\n tgas = getGasTemperature(grid=grid, ppar=ppar)\n for ig in range(ngs):\n tdust[:,:,:,ig] = tgas\n\n return tdust", "def timeStep(self):\n return self.params['h']", "def min_temp(self):\n return self.atag.dhw_min_temp", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def color_temp(self) -> int:\n new_range = self._tuya_temp_range()\n tuya_color_temp = self.tuya_device.status.get(self.dp_code_temp, 0)\n return (\n self.max_mireds\n - self.remap(\n tuya_color_temp,\n new_range[0],\n new_range[1],\n self.min_mireds,\n self.max_mireds,\n )\n + self.min_mireds\n )", "def plc_temp(coil_df):", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p" ]
[ "0.62291473", "0.6129261", "0.58999515", "0.5889212", "0.5879499", "0.58049667", "0.57781595", "0.5736014", "0.5695776", "0.56671214", "0.56644464", "0.5662342", "0.5658931", "0.56068385", "0.55981326", "0.5557203", "0.55318046", "0.55170333", "0.550636", "0.5491459", "0.54890627", "0.5473628", "0.5473114", "0.54684335", "0.54599744", "0.54551935", "0.54512936", "0.5425547", "0.54206157", "0.54194367" ]
0.6740698
0
Cut spectrum to a given energy range; return [0,0] if out of range
def spectrum_cut(spectrum, eVrange=(0.0, 0.0)): if eVrange[1] == 0.0: return spectrum else: if spectrum[-1,0] <= eVrange[0] or spectrum[0,0] >= eVrange[1]: return np.array([[0, 0]], dtype=np.float) else: idx1 = np.argmax(spectrum[:,0] >= eVrange[0]) idx2 = np.argmax(spectrum[:,0] > eVrange[1]) if spectrum[0,0] >= eVrange[0]: idx1 = 0 if spectrum[-1,0] <= eVrange[1]: idx2 = -1 return spectrum[idx1:idx2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cut_spectrum(input_spectrum, desired_frequency_range):\n channels_ip = []\n for ip in input_spectrum.GetChannels():\n channel_ip = []\n channel_op = []\n for n, i in enumerate(ip):\n if n > desired_frequency_range[0] / input_spectrum.GetResolution() and n < desired_frequency_range[1] / \\\n input_spectrum.GetResolution():\n channel_ip.append(i)\n else:\n channel_ip.append(0.0)\n channel_op.append(0.0)\n channels_ip.append(tuple(channel_ip))\n input_spectrum_modified = sumpf.Spectrum(channels=tuple(channels_ip), resolution=input_spectrum.GetResolution(),\n labels=input_spectrum.GetLabels())\n return input_spectrum_modified", "def range_flux(self):\n return self.max_flux - self.min_flux", "def get_spectra_range(_filename, delay_us, source_to_detector_cm, range_min, range_max, time_lamda_ev_axis='eV'):\n df_spectra = pd.read_csv(_filename, sep='\\t', header=None)\n time_array = (np.array(df_spectra[0]))\n # flux_array = (np.array(df_spectra[1]))\n if time_lamda_ev_axis == 'lamda':\n lamda_array = time2lamda(time_array, delay_us, source_to_detector_cm)\n return lamda_array\n if time_lamda_ev_axis == 'eV':\n ev_array = time2ev(time_array, delay_us, source_to_detector_cm)\n ev_array = ev_array[range_min:range_max]\n ev_array = ev_array[::-1] # Flip array from descending to normal\n return ev_array\n if time_lamda_ev_axis == 'time':\n time_array = time_array[range_min:range_max]\n return time_array", "def range_spectrum_filter(self, center, width, k=3):\n fshift = _np.ones(self.shape[0])\n fshift[1::2] = -1\n slc_filter = _np.zeros(self.shape[0] // 2 + 1) * 0\n filter_slice = slice(center - width // 2, center + width // 2)\n slc_filter[filter_slice] = _sig.kaiser(width, k)\n raw_filter = _np.hstack([0, _np.fft.irfft(slc_filter) * fshift[1:]])\n return slc_filter", "def geneffcut(energy, array, cutvals=hads, bins=BINS):\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = 0.\n binning[binning >= len(bins)-1] = 0.\n hadeffcut = np.zeros(len(energy), dtype=bool)\n for i, cutval in enumerate(cutvals):\n binmask = binning == i\n hadeffcut[binmask] = array[binmask] < cutval\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = -1\n binning[binning >= len(bins)-1] = -1\n hadeffcut[binning == -1] = 0\n\n return hadeffcut", "def substract_given_gaussian(wavelength, spectrum, centre, peak=0, sigma=0, flux=0, search_peak=False, allow_absorptions = False,\n lowlow= 20, lowhigh=10, highlow=10, highhigh = 20, \n lmin=0, lmax=0, fmin=0, fmax=0, plot=True, fcal=False, verbose = True, warnings=True): \n do_it = False\n # Check that we have the numbers!\n if peak != 0 and sigma != 0 : do_it = True\n\n if peak == 0 and flux != 0 and sigma != 0:\n #flux = peak * sigma * np.sqrt(2*np.pi)\n peak = flux / (sigma * np.sqrt(2*np.pi))\n do_it = True \n\n if sigma == 0 and flux != 0 and peak != 0 :\n #flux = peak * sigma * np.sqrt(2*np.pi)\n sigma = flux / (peak * np.sqrt(2*np.pi)) \n do_it = True \n \n if flux == 0 and sigma != 0 and peak != 0 :\n flux = peak * sigma * np.sqrt(2*np.pi)\n do_it = True\n\n if sigma != 0 and search_peak == True: do_it = True \n\n if do_it == False:\n print(\"> Error! We need data to proceed! Give at least two of [peak, sigma, flux], or sigma and force peak to f[centre]\")\n s_s = spectrum\n else:\n # Setup wavelength limits\n if lmin == 0 :\n lmin = centre-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = centre+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((spectrum[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to centre\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n \n # Linear Fit to continuum \n try: \n mm,bb = np.polyfit(w_cont, f_cont, 1)\n except Exception:\n bb = np.nanmedian(spectrum)\n mm = 0.\n if verbose or warnings: \n print(\" WARNING! Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n # c_cont = mm*np.array(w_cont)+bb \n # rms continuum\n # rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n if search_peak:\n # Search for index here w_spec(index) closest to line\n try:\n min_w = np.abs(np.array(w_spec)-centre)\n mini = np.nanmin(min_w)\n peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n flux = peak * sigma * np.sqrt(2*np.pi) \n if verbose: print(\" Using peak as f[\",np.round(centre,2),\"] = \",np.round(peak,2),\" and sigma = \", np.round(sigma,2), \" flux = \",np.round(flux,2))\n except Exception:\n if verbose or warnings: print(\" Error trying to get the peak as requested wavelength is \",np.round(centre,2),\"! Ignoring this fit!\")\n peak = 0.\n flux = -0.0001\n \n no_substract = False\n if flux < 0:\n if allow_absorptions == False:\n if np.isnan(centre) == False:\n if verbose or warnings : print(\" WARNING! This is an ABSORPTION Gaussian! As requested, this Gaussian is NOT substracted!\")\n no_substract = True\n if no_substract == False: \n if verbose: print(\" Substracting Gaussian at {:7.1f} with peak ={:10.4f} sigma ={:6.2f} and flux ={:9.4f}\".format(centre, peak,sigma,flux))\n \n gaussian_fit = gauss(w_spec, centre, peak, sigma)\n \n \n index=0\n s_s=np.zeros_like(spectrum)\n for wave in range(len(wavelength)):\n s_s[wave]=spectrum[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n if plot: \n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at line\n plt.axvline(x=centre, color='k', linestyle='-', alpha=0.8)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(centre+highlow, centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(centre-lowlow, centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical lines to emission line\n #plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n #plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n #plt.plot(w_spec, residuals, 'k')\n #plt.title('Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show() \n plt.close()\n \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,spectrum, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n else:\n s_s = spectrum\n return s_s", "def trim_spectrum(freqs, power_spectra, f_range):\n\n # Create mask to index only requested frequencies\n f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])\n\n # Restrict freqs & psd to requested range. The if/else is to cover both 1d or 2d arrays\n freqs_ext = freqs[f_mask]\n power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \\\n else power_spectra[:, f_mask]\n\n return freqs_ext, power_spectra_ext", "def trim_spectrum(freqs, power_spectra, f_range):\n\n # Create mask to index only requested frequencies\n f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])\n\n # Restrict freqs & psd to requested range. The if/else is to cover both 1d or 2d arrays\n freqs_ext = freqs[f_mask]\n power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \\\n else power_spectra[:, f_mask]\n\n return freqs_ext, power_spectra_ext", "def get_min_cut(img, mask = None):\n \n global use_forward\n h, w = img.shape[:2]\n\n calc_energy = forward_energy if use_forward else backward_energy\n\n energy_map = calc_energy(img)\n\n if( mask is not None ):\n energy_map[np.where(mask >= 10)] = -100000000000.0\n\n former = np.zeros_like(energy_map,dtype=np.int)\n for i in range(1,h):\n for j in range(0,w):\n place = np.argmin(energy_map[i-1, max(0,j-1):j+2])\n former[i,j] = max(0,j-1)+place\n energy_map[i,j] += energy_map[i-1,max(0,j-1)+place]\n index = np.zeros(h,dtype = np.int)\n last = np.argmin(energy_map[-1])\n fast = np.ones((h,w),dtype= np.bool)\n for i in range(h-1,-1,-1):\n index[i] = last\n fast[i,last] = False\n last = former[i,last]\n\n return index, fast", "def sub_spectrum(self, start_w: float, stop_w: float):\n self.__bounds_check(*[start_w, stop_w])\n start_ind = np.where(start_w <= self.spectrum[:, 0])[0][0]\n stop_ind = np.where(self.spectrum[:, 0] <= stop_w)[0][-1] + 1\n subspec = self.spectrum[start_ind:stop_ind, :].copy()\n return subspec", "def cutSec(ppm, X, start, stop, featureMask):\n\tflip=0\n\tif ppm[0]>ppm[-1]:\n\t\tflip=1\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n \n #find first entry in ppm with >='start' valu\n\tstart = (ppm>=start).nonzero()\n\tstart = start[0][0]#first entry\n\tstop = (ppm<=stop).nonzero()\n\tstop = stop[0][-1]#last entry\n\n#currently setting featureMask will get rid of peaks in start:stop region BUT it also marks as excluded so have removed as inaccurately marking for exclusion when all we want to do is remove from intensityData not mark as exluded\n\ttry:\n\t\tfeatureMask[0,start:stop]=False # this may only occur on unit test data, not sure need to check but either way was causing issue\n\texcept:\n\t\tfeatureMask[start:stop]=False\n\tif flip==1:\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n\treturn ppm, X, featureMask\n\tpass", "def sanitize_energies(full_us, lamb_idx, cutoff=10000):\n ref_us = np.expand_dims(full_us[:, lamb_idx], axis=1)\n abs_us = np.abs(full_us - ref_us)\n return np.where(abs_us < cutoff, full_us, np.inf)", "def find_cut(events, rates, obstime, feature, low_cut, high_cut, gamma_efficiency):\n\n if events.shape[0] == 0:\n\n if feature == \"gammaness\":\n return low_cut\n else:\n return high_cut\n\n tol = 1000\n\n if feature == \"gammaness\":\n lookfor_cut = high_cut\n alternative_cut = low_cut\n else:\n lookfor_cut = low_cut\n alternative_cut = high_cut\n\n while tol > 1e-6:\n midpoint = (lookfor_cut + alternative_cut) / 2.0\n\n if samesign(diff_events_after_cut(events, rates, obstime, feature, lookfor_cut, gamma_efficiency),\n diff_events_after_cut(events, rates, obstime, feature, midpoint, gamma_efficiency)):\n lookfor_cut = midpoint\n else:\n alternative_cut = midpoint\n\n tol = abs(alternative_cut - lookfor_cut)\n return midpoint", "def FilterByRange(X, rangeCut=0.4):\n Rg = X.iloc[:, X.columns.get_level_values(1) == \"ptp\"]\n Xidx = np.all(Rg.values <= rangeCut, axis=1)\n return X.iloc[Xidx, :]", "def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]", "def cut( self, i_start, i_stop ):\n # create two series of indices, combine them and remove them from the data cube\n beginning = np.arange( i_start, dtype=int )\n end = np.arange( i_stop, self.n_steps, dtype=int )\n self._remove_steps( np.concatenate([beginning,end]).tolist() )", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def ex_crange(data):\n center = minv = maxv = spread = 0\n step = 1\n try:\n center = int(data[0])\n spread = int(data[1])\n if len(data) > 2:\n step = int(data[2])\n minv = center - spread/2\n maxv = center + spread/2\n except ValueError:\n pass\n if step == 0:\n step = 1\n if minv > maxv:\n minv, maxv = maxv, minv\n rv = [center]\n v = center - step\n while minv <= v <= maxv:\n rv.insert(0, v)\n v -= step\n v = center + step\n while minv <= v <= maxv:\n rv.append(v)\n v += step\n return rv", "def getPeakDimFullShiftRange(peakDim):\n\n\n dataDimRef = peakDim.dataDimRef\n dataDim = peakDim.dataDim\n if not dataDimRef:\n values = dataDim.pointValues\n return [ min(values), max(values) ]\n \n expDimRef = dataDim.expDim.findFirstExpDimRef()\n shiftList = expDimRef.expDim.experiment.shiftList\n unit = shiftList.unit\n\n if expDimRef.minAliasedFreq is None:\n if unit == 'point':\n minShift = dataDim.numPointsOrig\n else:\n minShift = unit_converter[('point',unit)](dataDim.numPointsOrig,dataDimRef)\n\n else:\n minShift = expDimRef.minAliasedFreq\n\n if expDimRef.maxAliasedFreq is None:\n if unit == 'point':\n maxShift = 0\n else:\n maxShift = unit_converter[('point',unit)](0,dataDimRef)\n\n else:\n maxShift = expDimRef.maxAliasedFreq\n \n shiftRange = [minShift,maxShift]\n shiftRange.sort()\n \n return shiftRange", "def clamp(img, bins, mask=None, sigma=0):\n data = smooth(img, sigma)\n return clamp_array(data, bins, mask=mask)", "def low_cut_filter(x, fs, cutoff=70):\n nyquist = fs // 2\n norm_cutoff = cutoff / nyquist\n\n # low cut filter\n fil = firwin(255, norm_cutoff, pass_zero=False)\n lcf_x = lfilter(fil, 1, x)\n\n return lcf_x", "def data_range(x):\n return max(x)-min(x)", "def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff", "def cleaned(self, start_threshold=0.01, end_threshold=0.25, shifted=True):\n start_i, end_i = None, None\n\n max_i = np.nanargmax(self.ys)\n max_y = self.ys[max_i]\n\n if start_threshold is not None:\n # includes the value before threshold is met\n for i, y in enumerate(self.ys[1:]):\n if y > max_y*start_threshold:\n start_i = i\n break\n\n if end_threshold is not None:\n for i, y in enumerate(self.ys[max_i:], start=max_i):\n if y < max_y*end_threshold:\n end_i = i\n break\n\n return self.cropped_index(start_i, end_i, shifted)", "def get_cut_dataframe(start, end):\n data = get_dataframe(start, end)\n nrow = data.shape[0]\n data.loc[0, 'from'] = start\n data.loc[0, 'delta'] = data.loc[0, 'to'] - start\n if data.loc[nrow-1, 'to'] > end:\n data.loc[nrow-1, 'to'] = end\n data.loc[nrow-1, 'delta'] = end - data.loc[nrow-1, 'from']\n return data", "def wavelength_bins(width=.1, start=700, stop=1500, energy=True):\n if not energy:\n return np.linspace(start, stop, int((stop - start) / width) + 1)\n\n h = astropy.constants.h.to('eV s').value\n c = astropy.constants.c.to('m/s').value\n const = h * c * 1e9\n # Calculate upper and lower energy limits from wavelengths, note that start and stop switch when going to energy\n e_stop = const / start\n e_start = const / stop\n n = int((e_stop - e_start) / width)\n # Construct energy bin edges (reversed) and convert back to wavelength\n return const / np.linspace(e_stop, e_start, n + 1)", "def clip_range(x, xlim):\n return min([max([x, xlim[0]]), xlim[1]])", "def calculateenergy_betweenfreq_freqdomain(input_signal_or_spectrum, desired_frequency_range):\n if isinstance(input_signal_or_spectrum, (sumpf.Signal)):\n ip = sumpf.modules.FourierTransform(signal=input_signal_or_spectrum).GetSpectrum()\n else:\n ip = input_signal_or_spectrum\n spec = cut_spectrum(ip, desired_frequency_range)\n energy = calculateenergy_freqdomain(spec)\n return energy", "def trim(self, start_time, end_time):\n\n # find indices of the times in self.times closest to min_t and max_t\n lowest_index = np.abs(self.times - start_time).argmin()\n highest_index = np.abs(self.times - end_time).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[:, lowest_index : highest_index + 1],\n frequencies=self.frequencies,\n times=self.times[lowest_index : highest_index + 1],\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )", "def getPrescaleFromCut(cut):\n sign = -1 if cut<0 else 1\n ucut = abs(cut)\n return (sign*0xFFFFFF ) / float( 0x1000000 - ucut )" ]
[ "0.6825761", "0.5779064", "0.5748746", "0.5698457", "0.56758094", "0.5662627", "0.56557333", "0.56557333", "0.56524175", "0.5537704", "0.5531709", "0.55142623", "0.54934263", "0.54912776", "0.54783106", "0.5407424", "0.5381168", "0.5365206", "0.53605443", "0.5358346", "0.5313667", "0.5292912", "0.5284035", "0.52774125", "0.5255929", "0.524961", "0.52395165", "0.523372", "0.5217437", "0.52151805" ]
0.791948
0
Computes the Bragg angle (deg) of the specified material, reflection and photon energy
def BraggAngle(ID,hkl,E=None): E = eV(E) d = dSpace(ID,hkl) theta = asind(lam(E)/2/d) return theta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def angle(self) -> float:\n ...", "def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang", "def angle(z):", "def getEdgeAngle():\n '''\n returns angle a\n a\n ◿\n b c\n '''\n ANGLE_OFFSET = 8 # How far off the angle measurements are in degrees.\n THRESHOLD = 220 # How much light must be reflected to 'notice' the desk.\n angle = 0\n while angle < panTilt.TLT_RANGE:\n angle += 1\n panTilt.tilt(int(angle))\n deskDetected = ir.readWithDelay()\n # print \"Angle:\", angle + ANGLE_OFFSET, \", ir reading:\", deskDetected\n if deskDetected > THRESHOLD or angle == panTilt.TLT_RANGE:\n # print \"-----------------------\"\n break # Break out of looking downwards loop\n panTilt.up() # Look up again\n return 90 - angle - ANGLE_OFFSET", "def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360", "def gona(self):\n return GONAngle(dec2gon(self.dec_angle))", "def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()", "def gona(self):\n return GONAngle(self.gon())", "def gona(self):\n return GONAngle(self.gon())", "def gona(self):\n return GONAngle(self.gon())", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def calculate_angmom(partdict, eq):\n rho = partdict['rho']\n polflux_norm = rho**2\n\n psia = eq.psiaxis; psiw=eq.psiedge\n if psiw<psia or psiw==0:\n psiw-=psia; psia-=psia; # now stuff set from 0 to something.\n if psiw<0: \n psiw=psiw*-1.;\n\n polflux = polflux_norm*(psiw-psia)+psia\n\n m = partdict['m']\n R = partdict['R']\n vphi = np.copy(partdict['vphi'])\n Z = partdict['Z']\n #vphi *= m*1.6e-27/(Z*1.602e-19*b_param(R)*R)\n canangmom = m*1.66e-27*R*vphi-Z*1.602e-19*polflux\n canangmom = np.array(canangmom)\n return canangmom", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def angle_with_membrane_normal(self) -> float:\n memb_normal = np.array([0, 0, 1])\n return np.degrees(np.arccos(np.clip(np.dot(self.dir_vec, memb_normal),\n -1.0, 1.0)))", "def angle(self) -> int:", "def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def angle_convert(q_norm, energy):\n q_norm = np.array([q for q in q_norm if np.abs((HBAR * SPEED_OF_LIGHT * q) / (2 * energy)) <= 1])\n return 2 * (180.0 / np.pi) * np.arcsin((HBAR * SPEED_OF_LIGHT * q_norm) / (2 * energy))", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def angle(self, dangle_deg: float) -> None:\n ...", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def angle(self):\n return 0", "def compute_hydration_energy(molecule, parameters, platform_name=\"Reference\"):\n\n platform = openmm.Platform.getPlatformByName(platform_name)\n\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n \n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n \n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n try:\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole\n except Exception, exception:\n print \"Cannot find parameters for atomtype '%s' in molecule '%s'\" % (atomtype, molecule.GetTitle())\n print parameters.keys()\n raise exception\n \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n \n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energy = state.getPotentialEnergy() / units.kilocalories_per_mole\n if numpy.isnan(energy):\n energy = +1e6;\n\n return energy", "def _joint_angle_control(self):\n\n error = self.target_pos - self.robot_arm_pos\n return self._pd_control(error) + self.torque", "def LED_angle():\n right = np.transpose(vision.pqr_l)\n print(right)\n if not right.size:\n return\n else:\n angle = np.arctan2(right[0, 1], right[0, 0]) * 180 / pi\n if angle > -15:\n leds.off()\n else:\n leds.on()", "def Ag():\n # return load_material(miepy.__path__[0] + \"/materials/ag.npy\")\n\n wp = 9.01\n sig = [1.01889808, 0.62834151]\n f = [0,5.05635462]\n gam = [0.01241231, 0.54965831]\n wav = np.linspace(300,1100,1000)\n return drude_lorentz(wp,sig,f,gam,wav)", "def mag_ang(self, x):\n return 20*np.log10(np.abs(x)),np.arctan2(x.imag, x.real)*180/np.pi", "def blazeAngle(inc,wave,m,d):\n psi = blazeYaw(inc,wave,m,d)\n beta1 = cos(inc)*cos(psi)\n alpha1 = cos(inc)*sin(psi)-m*wave/d\n return np.arcsin(alpha1/cos(np.arcsin(beta1)))" ]
[ "0.6266474", "0.58636326", "0.581911", "0.579827", "0.57421386", "0.5730155", "0.57119465", "0.56873935", "0.5686968", "0.5686968", "0.5686968", "0.56705695", "0.56380606", "0.55969983", "0.55962616", "0.5550515", "0.5547123", "0.5540648", "0.55189204", "0.551004", "0.551004", "0.5481405", "0.54766595", "0.54633904", "0.5445992", "0.5424581", "0.5420115", "0.54163164", "0.5408278", "0.54072636" ]
0.60041696
1
Computes the structure factor
def StructureFactor(ID,f,hkl,z=None): ID=goodID(ID) i=complex(0,1) h=hkl[0] k=hkl[1] l=hkl[2] L=latticeType[ID] if L=='fcc': F=f*(1+np.exp(-i*np.pi*(k+l))+np.exp(-i*np.pi*(h+l))+np.exp(-i*np.pi*(h+k))) elif L=='bcc': F=f*(1+np.exp(-i*np.pi*(h+k+l))) elif L=='cubic': F=f elif L=='diamond': F=f*(1+np.exp(-i*np.pi*(k+l))+np.exp(-i*np.pi*(h+l))+np.exp(-i*np.pi*(h+k)))*(1+np.exp(-i*2*np.pi*(h/4.0+k/4.0+l/4.0))) # elif L=='rhomb': # z=latticeParamRhomb[ID] # F=f*(1+np.exp(2*i*np.pi*(h+k+l)*z)) elif L=='tetr': F=f elif L=='hcp': F=f*(1+np.exp(2*i*np.pi*(h/3.0+2*k/3.0+l/2.0))) else: raise Exception(f'Unrecognized L: {L}') return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_factors():", "def get_structure_factor(\n scalar_field: ScalarField,\n smoothing: Union[None, float, str] = \"auto\",\n wave_numbers: Union[Sequence[float], str] = \"auto\",\n add_zero: bool = False,\n) -> Tuple[np.ndarray, np.ndarray]:\n logger = logging.getLogger(__name__)\n\n if not isinstance(scalar_field, ScalarField):\n raise TypeError(\n \"Length scales can only be calculated for scalar \"\n f\"fields, not {scalar_field.__class__.__name__}\"\n )\n\n grid = scalar_field.grid\n if not isinstance(grid, CartesianGridBase):\n raise NotImplementedError(\n \"Structure factor can currently only be calculated for Cartesian grids\"\n )\n if not all(grid.periodic):\n logger.warning(\n \"Structure factor calculation assumes periodic boundary \"\n \"conditions, but not all grid dimensions are periodic\"\n )\n\n # do the n-dimensional Fourier transform and calculate the structure factor\n f1 = np_fftn(scalar_field.data, norm=\"ortho\").flat[1:]\n flat_data = scalar_field.data.flat\n sf = np.abs(f1) ** 2 / np.dot(flat_data, flat_data)\n\n # an alternative calculation of the structure factor is\n # f2 = np_ifftn(scalar_field.data, norm='ortho').flat[1:]\n # sf = (f1 * f2).real\n # sf /= (scalar_field.data**2).sum()\n # but since this involves two FFT, it is probably slower\n\n # determine the (squared) components of the wave vectors\n k2s = [\n np.fft.fftfreq(grid.shape[i], d=grid.discretization[i]) ** 2\n for i in range(grid.dim)\n ]\n # calculate the magnitude\n k_mag = np.sqrt(reduce(np.add.outer, k2s)).flat[1:]\n\n no_wavenumbers = wave_numbers is None or (\n isinstance(wave_numbers, str) and wave_numbers == \"auto\"\n )\n\n if smoothing is not None and smoothing != \"none\":\n # construct the smoothed function of the structure factor\n if smoothing == \"auto\":\n smoothing = k_mag.max() / 128\n smoothing = float(smoothing) # type: ignore\n sf_smooth = SmoothData1D(k_mag, sf, sigma=smoothing)\n\n if no_wavenumbers:\n # determine the wave numbers at which to evaluate it\n k_min = 2 / grid.cuboid.size.max()\n k_max = k_mag.max()\n k_mag = np.linspace(k_min, k_max, 128)\n\n else:\n k_mag = np.array(wave_numbers)\n\n # obtain the smoothed values at these points\n sf = sf_smooth(k_mag)\n\n elif not no_wavenumbers:\n logger.warning(\n \"Argument `wave_numbers` is only used when `smoothing` is enabled.\"\n )\n\n if add_zero:\n sf = np.r_[1, sf]\n k_mag = np.r_[0, k_mag]\n\n return k_mag, sf", "def structure_factor(trj, Q_range=(0.5, 50), n_points=1000, framewise_rdf=False, weighting_factor='fz'):\n if weighting_factor not in ['fz']:\n raise ValueError('Invalid weighting_factor `{}` is given.'\n ' The only weighting_factor currently supported is `fz`.'.format(\n weighting_factor))\n\n rho = np.mean(trj.n_atoms / trj.unitcell_volumes)\n L = np.min(trj.unitcell_lengths)\n\n top = trj.topology\n elements = set([a.element for a in top.atoms])\n\n compositions = dict()\n form_factors = dict()\n rdfs = dict()\n\n Q = np.logspace(np.log10(Q_range[0]),\n np.log10(Q_range[1]),\n num=n_points)\n S = np.zeros(shape=(len(Q)))\n\n for elem in elements:\n compositions[elem.symbol] = len(top.select('element {}'.format(elem.symbol)))/trj.n_atoms\n form_factors[elem.symbol] = elem.atomic_number\n\n for i, q in enumerate(Q):\n num = 0\n denom = 0\n\n for elem in elements:\n denom += compositions[elem.symbol] * form_factors[elem.symbol]\n\n for (elem1, elem2) in it.product(elements, repeat=2):\n e1 = elem1.symbol\n e2 = elem2.symbol\n\n f_a = form_factors[e1]\n f_b = form_factors[e2]\n\n x_a = compositions[e1]\n x_b = compositions[e2]\n \n try:\n g_r = rdfs['{0}{1}'.format(e1, e2)]\n except KeyError:\n pairs = top.select_pairs(selection1='element {}'.format(e1),\n selection2='element {}'.format(e2))\n if framewise_rdf:\n r, g_r = rdf_by_frame(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n else:\n r, g_r = md.compute_rdf(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n rdfs['{0}{1}'.format(e1, e2)] = g_r\n integral = simps(r ** 2 * (g_r - 1) * np.sin(q * r) / (q * r), r)\n\n if weighting_factor == 'fz':\n pre_factor = 4 * np.pi * rho\n partial_sq = (integral*pre_factor) + 1\n num += (x_a*f_a*x_b*f_b) * (partial_sq)\n S[i] = (num/(denom**2))\n return Q, S", "def calculate_217f_part_stress(**attributes): # pylint: disable=R0912, R0914\n _dic_ref_temp = {\n 1: 343.0,\n 2: {\n 1: 343.0,\n 2: 343.0,\n 3: 398.0,\n 4: 398.0\n },\n 3: 298.0,\n 5: 398.0,\n 6: 298.0,\n 7: 298.0,\n 9: 358.0,\n 10: 358.0,\n 11: 313.0,\n 12: 298.0,\n 13: 358.0,\n 14: 343.0,\n 15: 343.0\n }\n _dic_factors = {\n 1: [4.5E-9, 12.0, 1.0, 0.6, 1.0, 1.0],\n 2: {\n 1: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 2: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 3: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0],\n 4: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0]\n },\n 3: [7.33E-3, 0.202, 2.6, 1.45, 0.89, 1.3],\n 5: [0.0031, 1.0, 10.0, 1.0, 1.0, 1.5],\n 6: [0.00148, 1.0, 2.0, 0.5, 1.0, 1.0],\n 7: [0.00015, 2.64, 1.0, 0.466, 1.0, 1.0],\n 8: [0.021, 0.065, 0.105, 0.0, 0.0, 0.0],\n 9: [0.0062, 1.0, 5.0, 1.0, 1.0, 1.0],\n 10: [0.0735, 1.03, 4.45, 2.74, 3.51, 1.0],\n 11: [0.0398, 0.514, 5.28, 1.44, 4.46, 1.0],\n 12: [0.0481, 0.334, 4.66, 1.47, 2.83, 1.0],\n 13: [0.019, 0.445, 7.3, 2.69, 2.46, 1.0],\n 14: [0.0246, 0.459, 9.3, 2.32, 5.3, 1.0],\n 15: [0.018, 1.0, 7.4, 2.55, 3.6, 1.0]\n }\n _dic_piQ = {\n 1: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 2: [0.03, 0.1, 0.3, 1.0, 5.0, 5.0, 15.0],\n 3: [1.0, 3.0],\n 4: [1.0, 3.0],\n 5: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 6: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 7: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 8: [1.0, 15.0],\n 9: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 10: [2.5, 5.0],\n 11: [2.0, 4.0],\n 12: [2.0, 4.0],\n 13: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 14: [2.5, 5.0],\n 15: [2.0, 4.0]\n }\n _dic_piE = {\n 1: [\n 1.0, 3.0, 8.0, 5.0, 13.0, 4.0, 5.0, 7.0, 11.0, 19.0, 0.5, 11.0,\n 27.0, 490.0\n ],\n 2: [\n 1.0, 2.0, 8.0, 4.0, 14.0, 4.0, 8.0, 10.0, 18.0, 19.0, 0.2, 10.0,\n 28.0, 510.0\n ],\n 3: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 4: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 5: [\n 1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8, 14.0,\n 38.0, 610.0\n ],\n 6: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3, 13.0,\n 34.0, 610.0\n ],\n 7: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5, 13.0,\n 34.0, 610.0\n ],\n 8: [\n 1.0, 5.0, 21.0, 11.0, 24.0, 11.0, 30.0, 16.0, 42.0, 37.0, 0.5,\n 20.0, 53.0, 950.0\n ],\n 9: [\n 1.0, 2.0, 12.0, 6.0, 20.0, 5.0, 8.0, 9.0, 15.0, 33.0, 0.5, 18.0,\n 48.0, 870.0\n ],\n 10: [\n 1.0, 2.0, 18.0, 8.0, 30.0, 8.0, 12.0, 13.0, 18.0, 53.0, 0.5, 29.0,\n 76.0, 1400.0\n ],\n 11: [\n 1.0, 2.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 1.0, 3.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 1.0, 3.0, 14.0, 6.0, 24.0, 5.0, 7.0, 12.0, 18.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ],\n 14: [\n 1.0, 2.0, 19.0, 8.0, 29.0, 40.0, 65.0, 48.0, 78.0, 46.0, 0.5, 25.0,\n 66.0, 1200.0\n ],\n 15: [\n 1.0, 3.0, 14.0, 7.0, 24.0, 6.0, 12.0, 20.0, 30.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ]\n }\n # Resistance factor (piR) dictionary of values. The key is the\n # subcategory ID. The index in the returned list is the resistance range\n # breakpoint (breakpoint values are in _lst_breakpoints below). For\n # subcategory ID 6 and 7, the specification ID selects the correct set of\n # lists, then the style ID selects the proper list of piR values and then\n # the resistance range breakpoint is used to select\n _dic_piR = {\n 1: [1.0, 1.1, 1.6, 2.5],\n 2: [1.0, 1.1, 1.6, 2.5],\n 3: [1.0, 1.2, 1.3, 3.5],\n 5: [1.0, 1.7, 3.0, 5.0],\n 6: [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6,\n 0.0], [1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2,\n 1.6], [1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.2, 1.6, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 2.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [1.0, 1.2, 1.4, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.6, 0.0, 0.0, 0.0], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.6, 0.0, 0.0\n ], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.2, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.5, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 1.6, 0.0],\n [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [\n 1.0, 1.0, 1.4, 2.4, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.2, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.6, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.2, 1.5, 0.0, 0.0,\n 0.0], [1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]],\n 7: [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.2, 1.6],\n [1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],\n [[1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.1, 1.2, 1.4, 0.0],\n [1.0, 1.0, 1.0, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]],\n 9: [1.0, 1.4, 2.0],\n 10: [1.0, 1.1, 1.4, 2.0, 2.5, 3.5],\n 11: [1.0, 1.4, 2.0],\n 12: [1.0, 1.4, 2.0],\n 13: [1.0, 1.1, 1.2, 1.4, 1.8],\n 14: [1.0, 1.1, 1.2, 1.4, 1.8],\n 15: [1.0, 1.1, 1.2, 1.4, 1.8]\n }\n # Dictionary containing the number of element breakpoints for determining\n # the resistance factor list to use.\n _dic_breakpoints = {\n 1: [1.0E5, 1.0E6, 1.0E7],\n 2: [1.0E5, 1.0E6, 1.0E7],\n 3: [100.0, 1.0E5, 1.0E6],\n 5: [1.0E4, 1.0E5, 1.0E6],\n 6: [[500.0, 1.0E3, 5.0E3, 7.5E3, 1.0E4, 1.5E4, 2.0E4],\n [100.0, 1.0E3, 1.0E4, 1.0E5, 1.5E5, 2.0E5]],\n 7: [500.0, 1.0E3, 5.0E3, 1.0E4, 2.0E4],\n 9: [2.0E3, 5.0E3],\n 10: [1.0E4, 2.0E4, 5.0E4, 1.0E5, 2.0E5],\n 11: [2.0E3, 5.0E3],\n 12: [2.0E3, 5.0E3],\n 13: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 14: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 15: [1.0E4, 5.0E4, 2.0E5, 1.0E6]\n }\n _dic_piV = {\n 9: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 10: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 11: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 12: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 13: [1.0, 1.05, 1.2],\n 14: [1.0, 1.05, 1.2],\n 15: [1.0, 1.05, 1.2]\n }\n _dic_piC = {10: [2.0, 1.0, 3.0, 1.5], 12: [2.0, 1.0]}\n _msg = ''\n\n # Calculate the base hazard rate.\n if attributes['subcategory_id'] == 2:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']][attributes[\n 'specification_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][5]\n elif attributes['subcategory_id'] not in [4, 8]:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][5]\n\n if attributes['subcategory_id'] == 4:\n attributes['lambda_b'] = 0.00006\n elif attributes['subcategory_id'] == 8:\n attributes['lambda_b'] = _dic_factors[attributes['subcategory_id']][\n attributes['type_id'] - 1]\n else:\n attributes['lambda_b'] = _f0 * exp(_f1 * (\n (attributes['temperature_active'] + 273.0) /\n _ref_temp))**_f2 * exp(((attributes['power_ratio'] / _f3) * (\n (attributes['temperature_active'] + 273.0) / 273.0)**_f4)**_f5)\n\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}'.format(attributes['hardware_id'])\n\n # Calculate the resistance factor (piR).\n if attributes['subcategory_id'] not in [4, 8]:\n _index = -1\n if attributes['subcategory_id'] == 6:\n _breaks = _dic_breakpoints[attributes['subcategory_id']][\n attributes['specification_id'] - 1]\n else:\n _breaks = _dic_breakpoints[attributes['subcategory_id']]\n\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['n_elements']\n if len(_breaks) == 1 and _diff < 0:\n break\n elif _diff >= 0:\n break\n\n if attributes['subcategory_id'] in [6, 7]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][\n attributes['specification_id'] - 1][attributes['family_id'] -\n 1][_index + 1]\n elif attributes['subcategory_id'] not in [4, 8]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][_index +\n 1]\n\n # Determine the quality factor (piQ).\n attributes['piQ'] = _dic_piQ[attributes['subcategory_id']][\n attributes['quality_id'] - 1]\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Determine the environmental factor (piE).\n attributes['piE'] = _dic_piE[attributes['subcategory_id']][\n attributes['environment_active_id'] - 1]\n\n if attributes['piE'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piE is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Calculate the temperature factor (piT).\n if attributes['subcategory_id'] == 4:\n attributes['temperature_case'] = (attributes['temperature_active'] +\n 55.0 * attributes['power_ratio'])\n attributes['piT'] = exp(-4056.0 * (\n (1.0 / (attributes['temperature_case'] + 273.0)) - 1.0 / 298.0))\n\n # Calculate the taps factor (piTAPS).\n if attributes['subcategory_id'] in [9, 10, 11, 12, 13, 14, 15]:\n attributes['piTAPS'] = (attributes['n_elements']**1.5 / 25.0) + 0.792\n\n # Calculate the voltage factor (piV).\n if attributes['subcategory_id'] > 8:\n _index = -1\n if attributes['subcategory_id'] in [9, 10, 11, 12]:\n _breaks = [0.1, 0.2, 0.6, 0.7, 0.8, 0.9]\n elif attributes['subcategory_id'] in [13, 14, 15]:\n _breaks = [0.8, 0.9]\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['voltage_ratio']\n if len(_breaks) == 1 and _diff < 0.0:\n break\n elif _index == 0 and _diff >= 0.0:\n break\n elif _diff >= 0:\n break\n attributes['piV'] = _dic_piV[attributes['subcategory_id']][_index]\n\n # Determine the consruction class factor (piC).\n if attributes['subcategory_id'] in [10, 12]:\n attributes['piC'] = _dic_piC[attributes['subcategory_id']][\n attributes['construction_id'] - 1]\n\n # Calculate the active hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'] * attributes['piE'])\n if attributes['subcategory_id'] == 4:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piT'] *\n attributes['n_elements'])\n elif attributes['subcategory_id'] in [9, 11, 13, 14, 15]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] in [10, 12]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piC'] * attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] != 8:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piR'])\n\n return attributes, _msg", "def structure(self):\n if self.abelian:\n return self.abelian\n\n # step 1. find order E/F_p.\n simplified = self.simple()\n N = simplified.order()\n if prime.primeq(N):\n return (1, N)\n\n # step 2. decompose N.\n r = gcd.gcd(simplified.ch - 1, N)\n _log.debug(\"r = %d, N = %d\" % (r, N))\n r_factor = factor_methods.factor(r)\n N0 = r\n N1, N2 = 1, N\n for p, _ in r_factor:\n k, N2 = arith1.vp(N2, p=p)\n N1 *= p**k\n \n while 1:\n P1 = self.infpoint\n while P1 == self.infpoint:\n P1 = simplified.point()\n P2 = self.infpoint\n while P2 == self.infpoint:\n P2 = simplified.point()\n P1, P2 = simplified.mul(N2, P1), simplified.mul(N2, P2)\n s = simplified.pointorder(P1, r, r_factor)\n t = simplified.pointorder(P2, r, r_factor)\n m = gcd.lcm(s, t)\n if m > 1:\n e = simplified.WeilPairing(m, P1, P2)\n if e != self.basefield.one:\n d = e.order()\n else:\n d = 1\n if m*d == N1:\n _log.debug(\"N1 = %d\" % N1)\n _log.debug(\"P1 = %s (pointorder=%d)\" % (P1, s))\n _log.debug(\"P2 = %s (pointorder=%d)\" % (P2, t))\n assert (not (N//d) % d), d\n self.abelian = (d, N//d)\n return self.abelian", "def scale(structure):\n from numpy.linalg import det\n if \"O\" in [atom.type for atom in structure]: spvol = 8.5**3/4e0\n elif \"Se\" in [atom.type for atom in structure]: spvol = 9.5**3/4e0\n elif \"Te\" in [atom.type for atom in structure]: spvol = 10.5**3/4e0\n else: raise ValueError(\"unknown atom.type: %s\" % (atom.type,))\n\n nfu = float(len(structure)/7)*0.5 # 0.5 because 2 f.u. in spinel unit-cell.\n vol = det(structure.cell)\n return (nfu * spvol / vol)**(1e0/3e0)", "def _reduced_mass(structure) -> float:\n reduced_comp = structure.composition.reduced_composition\n num_elems = len(reduced_comp.elements)\n elem_dict = reduced_comp.get_el_amt_dict()\n\n denominator = (num_elems - 1) * reduced_comp.num_atoms\n\n all_pairs = combinations(elem_dict.items(), 2)\n mass_sum = 0\n\n for pair in all_pairs:\n m_i = Composition(pair[0][0]).weight\n m_j = Composition(pair[1][0]).weight\n alpha_i = pair[0][1]\n alpha_j = pair[1][1]\n\n mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j) # type: ignore\n\n reduced_mass = (1 / denominator) * mass_sum\n\n return reduced_mass", "def factorize(x):\n pass", "def factor(P):\n cd = P[-1]\n if P.deg == 0:\n return (cd, defaultdict(int))\n P = P * (1 / cd)\n return (cd, P.factor_unit())", "def factor(self):\r\n\t\t\r\n\t\t# get gcf\r\n\t\tg = self.extract()\r\n\t\t\r\n\t\t# invert and multiply\r\n\t\tv = g.invert()\r\n\t\tf = self.multiply(v)\r\n\t\t\r\n\t\treturn f,g", "def getStructureType(entry, seed_index=0, supercell = 2,return_SS=False):\n \n \n # Entry is a list of the necessary components of the TSA. \n # Makes it easier to parallelize\n structure, tol, mp_id = entry\n norm_tol=tol-1\n\n s = copy.deepcopy(structure)\n heterogeneous = False\n heterogeneous_SS = False\n\n # Distance matrix (rowA, columnB) shows distance between\n # atoms A and B, taking PBCs into account.\n \n binary_matrix = getDistMat(s,norm_tol)\n cluster = buildNetwork(binary_matrix,seed_index)\n\n compo = Composition.from_dict(Counter([s[l].specie.name for \n l in list(cluster)]))\n if compo.reduced_formula != s.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n heterogeneous = True\n og_cluster = set()\n og_cluster.update(cluster)\n old_cluster_size = len(cluster)\n # Increase structure to determine dimensionality\n\n s = copy.deepcopy(structure)\n s.make_supercell(supercell)\n seed_index*=supercell**3\n\n\n\n binary_matrix = getDistMat(s,norm_tol)\n cluster = buildNetwork(binary_matrix,seed_index)\n\n\n if cluster!=set():\n new_cluster_size = len(cluster)\n # Get ratio of original and final cluster lengths\n scale = new_cluster_size/old_cluster_size\n compo = Composition.from_dict(Counter([s[l].specie.name for l in\n list(cluster)]))\n if compo.reduced_formula != s.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n heterogeneous_SS = True\n motiif = getDim(scale,supercell)\n if heterogeneous or heterogeneous_SS:\n motiif += \"_heter\"\n\n if return_SS:\n return [motiif,mp_id,tol,compo.reduced_formula,list(cluster)]\n else:\n return [motiif,mp_id,tol,compo.reduced_formula,list(og_cluster)]", "def structure_cost(num_modules):\n\n total_installed_power = num_modules * PTC_POWER_MODULE * kW_to_W\n\n return STRUCTURE_PRICE * total_installed_power", "def nfactors(self):\n return self.L.nnz", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n\n # type A (finite and affine)\n if self._letter == 'A':\n # the formula is taken from Torkildsen - Counting\n # cluster-tilted algebras of type A\n if self.is_finite():\n n = self._rank\n a = binomial( 2*(n+1), n+1 ) // (n+2)\n if n % 2 == 1:\n a += binomial( n+1, (n+1)//2 )\n if n % 3 == 0:\n a += 2 * binomial( 2*n//3, n//3 )\n return a // (n+3)\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n elif self.is_affine():\n i,j = self._bi_rank\n i = ZZ(i)\n j = ZZ(j)\n n = i+j\n f = Euler_Phi()\n if i == j:\n return ( binomial( 2*i,i ) +\n sum( f(k) * binomial(2*i//k,i//k)**2\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // n ) // 4\n else:\n return sum( f(k) * binomial(2*i//k,i//k) *\n binomial(2*j//k,j//k)\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // ( 2 * n )\n\n # types B and C (finite and affine)\n elif self._letter in ['B', 'C']:\n # this formula is proven but nowhere published correctness\n # is clear enough that I don't think a warning is needed\n if self.is_finite():\n n = self._rank\n return binomial(2 * n, n) // (n + 1)\n\n elif self._letter in ['BB','CC']:\n # these two formulas are not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n if n%2==1:\n return binomial( 2*n-1, n-1 )\n else:\n return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )\n\n # type BC (affine)\n elif self._letter == 'BC':\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n return binomial( 2*n, n )\n\n # types BD and CD (affine)\n elif self._letter in ['BD','CD']:\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 2\n return 2*binomial( 2*n, n )\n\n # type D (finite and affine)\n elif self._letter == 'D':\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n if self.is_finite():\n if self._rank == 4:\n return 6\n else:\n f = Euler_Phi()\n n = ZZ(self._rank)\n return sum( f( n//k ) * binomial( 2*k, k )\n for k in n.divisors() ) // (2*n)\n # this formula is not yet proven\n elif self.is_affine():\n n = self._rank - 3\n if n == 2:\n return 9\n else:\n print(Warning (\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if n%2==1:\n return 2*binomial(2*n,n)\n else:\n return 2*binomial(2*n,n) + binomial(n, n//2)\n\n # the exceptional types are hard-coded\n # type E (finite, affine and elliptic)\n elif self._letter == 'E':\n if self.is_finite():\n if self._rank == 6:\n return 67\n elif self._rank == 7:\n return 416\n elif self._rank == 8:\n return 1574\n elif self.is_affine():\n if self._rank == 7:\n return 132\n elif self._rank == 8:\n return 1080\n elif self._rank == 9:\n return 7560\n elif self.is_elliptic():\n if self._rank == 8:\n return 49\n elif self._rank == 9:\n return 506\n elif self._rank == 10:\n return 5739\n\n # type F\n elif self._letter == 'F':\n if self.is_finite():\n return 15\n elif self.is_affine():\n return 60\n elif self.is_elliptic():\n if self._twist == [1,2]:\n return 90\n if self._twist == [1,1] or self._twist == [2,2]:\n return 35\n\n # type G\n elif self._letter == 'G':\n if self.is_finite():\n return 2\n elif self.is_affine():\n return 6\n elif self.is_elliptic():\n if self._twist == [1,3]:\n return 7\n if self._twist == [1,1] or self._twist == [3,3]:\n return 2\n\n # type X\n elif self._letter == 'X':\n if self._rank == 6:\n return 5\n elif self._rank == 7:\n return 2\n\n # otherwise the size is returned to be unknown\n else:\n print(\"Size unknown\")\n return NotImplemented", "def calculate_structure_function(self, cn_squared_profile):\n \n self.cn_squared = cn_squared_profile\n self.structure_function = self.cn_squared*(self.r0**(2/3))\n return structure_function", "def _structure_factor_wave_number(\n rdf: freud.density.RDF, wave_number: float, num_particles: int\n):\n dr = rdf.R[1] - rdf.R[0]\n integral = dr * np.sum((rdf.RDF - 1) * rdf.R * np.sin(wave_number * rdf.R))\n density = num_particles / rdf.box.volume\n return 1 + 4 * np.pi * density / wave_number * integral", "def test_str_fac():\n structure = Material(input)\n assert (np.abs(structure.calc_nuc_str_fac((2., 0., 0.))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((2, 0, 0))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((0, 2., 0))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((0, 2, 0))) ** 2 - 1702170.4663405998 < 1e-6)\n\n ndarray_example = np.linspace(0.5, 1.5, 21)\n assert (np.sum(abs(structure.calc_nuc_str_fac((ndarray_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, ndarray_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, ndarray_example))) ** 2) - 16831011.814390473 < 1e-6)\n assert (\n np.sum(abs(structure.calc_nuc_str_fac((ndarray_example, ndarray_example, 0))) ** 2) - 10616602.544519115 < 1e-6)\n\n list_example = list(ndarray_example)\n assert (np.sum(abs(structure.calc_nuc_str_fac((list_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, list_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, list_example))) ** 2) - 16831011.814390473 < 1e-6)\n\n tuple_example = tuple(ndarray_example)\n assert (np.sum(abs(structure.calc_nuc_str_fac((tuple_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, tuple_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, tuple_example))) ** 2) - 16831011.814390473 < 1e-6)", "def test_total_scattering_cross_section():\n structure = Material(input)\n assert (structure.total_scattering_cross_section == 31.880000000000003)", "def _compute_factors(roots, multiplicity, include_powers=False):\n current = cupy.array([1])\n suffixes = [current]\n for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n monomial = cupy.r_[1, -pole]\n for _ in range(int(mult)):\n current = cupy.polymul(current, monomial)\n suffixes.append(current)\n suffixes = suffixes[::-1]\n\n factors = []\n current = cupy.array([1])\n for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n monomial = cupy.r_[1, -pole]\n block = []\n for i in range(int(mult)):\n if i == 0 or include_powers:\n block.append(cupy.polymul(current, suffix))\n current = cupy.polymul(current, monomial)\n factors.extend(reversed(block))\n\n return factors, current", "def structure_to_abivars(self):\n types_of_specie = self.types_of_specie\n natom = self.num_sites\n\n znucl_type = [specie.number for specie in types_of_specie]\n\n # znucl_atoms = self.atomic_numbers\n\n typat = numpy.zeros(natom, numpy.int)\n for (atm_idx, site) in enumerate(self):\n typat[atm_idx] = types_of_specie.index(site.specie) + 1\n\n rprim = ArrayWithUnit(self.lattice.matrix, \"ang\").to(\"bohr\")\n xred = numpy.reshape([site.frac_coords for site in self], (-1, 3))\n\n # Set small values to zero. This usually happens when the CIF file\n # does not give structure parameters with enough digits.\n # rprim = np.where(np.abs(rprim) > 1e-8, rprim, 0.0)\n # xred = np.where(np.abs(xred) > 1e-8, xred, 0.0)\n\n d = dict(\n natom=natom,\n ntypat=len(types_of_specie),\n typat=typat,\n xred=xred,\n znucl=znucl_type)\n\n d.update(dict(\n acell=3 * [1.0],\n rprim=rprim))\n\n # d.update(dict(\n # acell=3 * [1.0],\n # angdeg))\n\n return d", "def calculate_components(self, parts):\n target = {}\n for part in parts:\n rank = part[0]\n\n try:\n face = part[1]\n except IndexError:\n face = '*'\n\n try:\n target[rank][face] += 1\n except KeyError:\n if rank not in target:\n target[rank] = {}\n target[rank][face] = 1\n\n return target", "def calculate_scaling_factors(blk):\n\n def cs(blk2):\n \"\"\"Recursive function for to do subblocks first\"\"\"\n for b in blk2.component_data_objects(pyo.Block, descend_into=False):\n cs(b)\n if hasattr(blk2, \"calculate_scaling_factors\"):\n blk2.calculate_scaling_factors()\n\n # Call recursive function to run calculate_scaling_factors on blocks from\n # the bottom up.\n cs(blk)\n # If a scale factor is set for an indexed component, propagate it to the\n # component data if a scale factor hasn't already been explicitly set\n propagate_indexed_component_scaling_factors(blk)\n # Use the variable scaling factors to scale the arc constraints.\n scale_arc_constraints(blk)", "def normalize(self, factor):", "def buoyancy_factor(self, fac):\n return fac", "def factor_carga(self):\r\n return self.nelementos() / self.n", "def test_conversion():\r\n f1 = factor([0,1],[2,2],scipy.rand(4))\r\n f2 = factor([1,2],[2,2],scipy.rand(4))\r\n f3 = factor([3],[2],scipy.rand(2))\r\n\r\n F = FactorList([f1,f2,f3])\r\n theta = factors2ExpFam(F)\r\n F2 = expfam2Factors(theta)\r\n ratio = F2.JointDistn().val/ (F.JointDistn().val)\r\n ratio = ratio/ratio[0]\r\n print scipy.allclose(ratio,1)", "def structureFactor(self, int_max=None, nBoxes=None):\n\n if nBoxes == None: nBoxes = np.sqrt(self.N)\n nBoxes = int(nBoxes)\n\n particleDensity = self.nParticleDensity(int_max=int_max, nBoxes=nBoxes)\n\n _S2D = np.array(list(map(\n lambda _rho:\n (lambda FFT: np.real(np.conj(FFT)*FFT))\n (np.fft.fft2(_rho)),\n particleDensity)))/self.N\n\n k2D = np.sqrt(\n (wave_vectors_2D(nBoxes, nBoxes, self.L/nBoxes)**2).sum(axis=-1))\n\n return g2Dto1Dgrid(_S2D.mean(axis=0), k2D)", "def calculate_217f_part_count(**attributes):\n _msg = ''\n\n # Dictionary containing MIL-HDBK-217FN2 parts count base hazard rates.\n # First key is the subcategory_id, second key is the specification id. If\n # the resistor subcategory is NOT specification dependent, then the second\n # key will be zero. Current subcategory IDs are:\n #\n # 1. Fixed, Composition (RC, RCR)\n # 2. Fixed, Film (RL, RLR, RN, RNC, RNN, RNR)\n # 3. Fixed, Film, Power (RD)\n # 4. Fixed, Film, Network (RZ)\n # 5. Fixed, Wirewound, Power (RB, RBR)\n # 6. Fixed, Wirewound, Power, Chassis Mounted (RE, RER)\n # 7. Thermistor\n # 8. Variable, Wirewound (RT, RTR)\n # 9. Variable, Wirewound, Precision (RR)\n # 10. Variable, Wirewound, Semiprecision (RA, RK)\n # 11. Variable, Non-Wirewound (RJ, RJR)\n # 12. Variable, Composition (RV)\n # 13. Variable,Non-Wirewound, Film and Precision (RQ, RVC)\n #\n # These keys return a list of base hazard rates. The hazard rate to use is\n # selected from the list depending on the active environment.\n _dic_lambda_b = {\n 1: [\n 0.0005, 0.0022, 0.0071, 0.0037, 0.012, 0.0052, 0.0065, 0.016,\n 0.025, 0.025, 0.00025, 0.0098, 0.035, 0.36\n ],\n 2: {\n 1: [\n 0.0012, 0.0027, 0.011, 0.0054, 0.020, 0.0063, 0.013, 0.018,\n 0.033, 0.030, 0.00025, 0.014, 0.044, 0.69\n ],\n 2: [\n 0.0012, 0.0027, 0.011, 0.0054, 0.020, 0.0063, 0.013, 0.018,\n 0.033, 0.030, 0.00025, 0.014, 0.044, 0.69\n ],\n 3: [\n 0.0014, 0.0031, 0.013, 0.0061, 0.023, 0.0072, 0.014, 0.021,\n 0.038, 0.034, 0.00028, 0.016, 0.050, 0.78\n ],\n 4: [\n 0.0014, 0.0031, 0.013, 0.0061, 0.023, 0.0072, 0.014, 0.021,\n 0.038, 0.034, 0.00028, 0.016, 0.050, 0.78\n ]\n },\n 3: [\n 0.012, 0.025, 0.13, 0.062, 0.21, 0.078, 0.10, 0.19, 0.24, 0.32,\n 0.0060, 0.18, 0.47, 8.2\n ],\n 4: [\n 0.0023, 0.0066, 0.031, 0.013, 0.055, 0.022, 0.043, 0.077, 0.15,\n 0.10, 0.0011, 0.055, 0.15, 1.7\n ],\n 5: [\n 0.0085, 0.018, 0.10, 0.045, 0.16, 0.15, 0.17, 0.30, 0.38, 0.26,\n 0.0068, 0.13, 0.37, 5.4\n ],\n 6: {\n 1: [\n 0.014, 0.031, 0.16, 0.077, 0.26, 0.073, 0.15, 0.19, 0.39, 0.42,\n 0.0042, 0.21, 0.62, 9.4\n ],\n 2: [\n 0.013, 0.028, 0.15, 0.070, 0.24, 0.065, 0.13, 0.18, 0.35, 0.38,\n 0.0038, 0.19, 0.56, 8.6\n ]\n },\n 7: [\n 0.008, 0.18, 0.096, 0.045, 0.15, 0.044, 0.088, 0.12, 0.24, 0.25,\n 0.004, 0.13, 0.37, 5.5\n ],\n 8: [\n 0.065, 0.32, 1.4, 0.71, 1.6, 0.71, 1.9, 1.0, 2.7, 2.4, 0.032, 1.3,\n 3.4, 62.0\n ],\n 9: [\n 0.025, 0.055, 0.35, 0.15, 0.58, 0.16, 0.26, 0.35, 0.58, 1.1, 0.013,\n 0.52, 1.6, 24.0\n ],\n 10: [\n 0.33, 0.73, 7.0, 2.9, 12.0, 3.5, 5.3, 7.1, 9.8, 23.0, 0.16, 11.0,\n 33.0, 510.0\n ],\n 11: [\n 0.15, 0.35, 3.1, 1.2, 5.4, 1.9, 2.8, 0.0, 0.0, 9.0, 0.075, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 0.15, 0.34, 2.9, 1.2, 5.0, 1.6, 2.4, 0.0, 0.0, 7.6, 0.076, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 0.043, 0.15, 0.75, 0.35, 1.3, 0.39, 0.78, 1.8, 2.8, 2.5, 0.21, 1.2,\n 3.7, 49.0\n ],\n 14: [\n 0.05, 0.11, 1.1, 0.45, 1.7, 2.8, 4.6, 4.6, 7.5, 3.3, 0.025, 1.5,\n 4.7, 67.0\n ],\n 15: [\n 0.048, 0.16, 0.76, 0.36, 1.3, 0.36, 0.72, 1.4, 2.2, 2.3, 0.024,\n 1.2, 3.4, 52.0\n ]\n }\n\n # List containing piQ values for parts count method. The list positions\n # corrspond to the following quality levels:\n #\n # 0. Established reliability level S\n # 1. Established reliability level R\n # 2. Established reliability level P\n # 3. Established reliability level M\n # 4. Non-established reliability MIL-SPEC\n # 5. Non-established reliability lower\n #\n # The quality_id attribute is used to select the proper value of piQ.\n _lst_piQ = [0.030, 0.10, 0.30, 1.0, 3.0, 10.0]\n\n # Select the base hazard rate.\n try:\n if attributes['subcategory_id'] in [2, 6]:\n _lst_base_hr = _dic_lambda_b[attributes['subcategory_id']][\n attributes['specification_id']]\n else:\n _lst_base_hr = _dic_lambda_b[attributes['subcategory_id']]\n except KeyError:\n _lst_base_hr = [0.0]\n\n try:\n attributes['lambda_b'] = _lst_base_hr[\n attributes['environment_active_id'] - 1]\n except IndexError:\n attributes['lambda_b'] = 0.0\n\n # Select the piQ.\n try:\n attributes['piQ'] = _lst_piQ[attributes['quality_id'] - 1]\n except IndexError:\n attributes['piQ'] = 0.0\n\n # Confirm all inputs are within range. If not, set the message. The\n # hazard rate will be calculated anyway, but will be zero.\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}, subcategory ID: {1:d}, specification ID: {2:d}, ' \\\n 'active environment ID: {3:d}, and quality ID: ' \\\n '{4:d}.\\n'.format(attributes['hardware_id'],\n attributes['subcategory_id'],\n attributes['specification_id'],\n attributes['environment_active_id'],\n attributes['quality_id'])\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}, quality ID: ' \\\n '{1:d}.'.format(attributes['hardware_id'],\n attributes['quality_id'])\n\n # Calculate the hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'])\n\n return attributes, _msg", "def divide_microstructure_unit(self,point1,point2,dimensions):\n\t\tnew_sections = []\n\t\tif len(point1) < 4:\n\t\t\tdone = 0.0\n\t\t\tdtwo = 0.0\n\t\telse:\n\t\t\tdone = point1[-1]\n\t\t\tdtwo = point2[-1]\n\t\t\n\t\tp1 = np.array(point1[:3])\n\t\tp2 = np.array(point2[:3])\n\t\tvec = p2-p1\n\t\tdimslength = float(np.sum(dimensions))\n\t\tfor d,dim in enumerate(dimensions[:-1]):\n\t\t\tnearsideproportion = np.sum(dimensions[:d])/dimslength\n\t\t\tfarsideproportion = np.sum(dimensions[:d+1])/dimslength\n\t\t\tnew_sections.append([\t\n\t\t\t\t\t\tlist(np.append(p1+vec*nearsideproportion,done)),\n\t\t\t\t\t\tlist(np.append(((p1+vec*nearsideproportion)+(p1+vec*farsideproportion))/2.0,(done+dtwo)/2.0)),\n\t\t\t\t\t\tlist(np.append(p1+vec*farsideproportion,dtwo))\n\t\t\t\t\t\t])\n\t\t\n\t\tnew_sections.append([\t\n\t\t\t\t\tlist(new_sections[-1][-1]),\n\t\t\t\t\tlist((np.array(new_sections[-1][-1])+np.array(list(point2[:3])+[dtwo]))/2.0),\n\t\t\t\t\tlist(point2[:3])+[dtwo]\n\t\t\t\t\t])\n\t\t\n\t\tif len(dimensions) > 2:\n\t\t\treturn(new_sections,['node','paranode1','paranode2','internode','paranode2','paranode1'][:len(new_sections)])\n\t\t\n\t\telse:\n\t\t\treturn(new_sections,['interbouton','bouton'][:len(new_sections)])", "def structure_to_abivars(structure, **kwargs):\n if not structure.is_ordered:\n raise ValueError(\"\"\"\\\nReceived disordered structure with partial occupancies that cannot be converted into an Abinit input\nPlease use OrderDisorderedStructureTransformation or EnumerateStructureTransformation\nto build an appropriate supercell from partial occupancies or alternatively use the Virtual Crystal Approximation.\"\"\")\n\n types_of_specie = structure.types_of_specie\n natom = structure.num_sites\n\n znucl_type = [specie.number for specie in types_of_specie]\n znucl_atoms = structure.atomic_numbers\n\n typat = np.zeros(natom, np.int)\n for atm_idx, site in enumerate(structure):\n typat[atm_idx] = types_of_specie.index(site.specie) + 1\n\n rprim = ArrayWithUnit(structure.lattice.matrix, \"ang\").to(\"bohr\")\n angdeg = structure.lattice.angles\n xred = np.reshape([site.frac_coords for site in structure], (-1, 3))\n\n # Set small values to zero. This usually happens when the CIF file\n # does not give structure parameters with enough digits.\n rprim = np.where(np.abs(rprim) > 1e-8, rprim, 0.0)\n xred = np.where(np.abs(xred) > 1e-8, xred, 0.0)\n\n # Info on atoms.\n d = dict(\n natom=natom,\n ntypat=len(types_of_specie),\n typat=typat,\n znucl=znucl_type,\n xred=xred,\n )\n\n # Add info on the lattice.\n # Should we use (rprim, acell) or (angdeg, acell) to specify the lattice?\n geomode = kwargs.pop(\"geomode\", \"rprim\")\n if geomode == \"automatic\":\n geomode = \"rprim\"\n if structure.lattice.is_hexagonal: # or structure.lattice.is_rhombohedral\n geomode = \"angdeg\"\n angdeg = structure.lattice.angles\n # Here one could polish a bit the numerical values if they are not exact.\n # Note that in pmg the angles are 12, 20, 01 while in Abinit 12, 02, 01\n # One should make sure that the orientation is preserved (see Curtarolo's settings)\n\n if geomode == \"rprim\":\n d.update(\n acell=3 * [1.0],\n rprim=rprim,\n )\n\n elif geomode == \"angdeg\":\n d.update(\n acell=ArrayWithUnit(structure.lattice.abc, \"ang\").to(\"bohr\"),\n angdeg=angdeg,\n )\n else:\n raise ValueError(\"Wrong value for geomode: %s\" % geomode)\n\n return d" ]
[ "0.6150205", "0.56703275", "0.5611402", "0.5582101", "0.5568", "0.55506617", "0.5549934", "0.55357444", "0.5386212", "0.5341239", "0.5322685", "0.53065836", "0.53054816", "0.529132", "0.5288285", "0.52503294", "0.5247208", "0.5234449", "0.5224676", "0.5212955", "0.5189034", "0.5174233", "0.5163635", "0.5162873", "0.51571345", "0.51515436", "0.5150636", "0.5144145", "0.5134218", "0.51208884" ]
0.5964732
1
Computes the Darwin width for a specified crystal reflection (degrees)
def DarwinWidth(ID, hkl, E, T=293): ID = goodID(ID) E = eV(E) theta = BraggAngle(ID,hkl,E) l = lam(E) f = FF(ID,2*theta,E) F = StructureFactor(ID,f,hkl) V = UnitCellVolume(ID) dw=(2*c['eRad']*l**2*np.abs(F))/(np.pi*V*sind(2*theta))/u['rad'] return dw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diffusion_width(conversion_depth): #Return value in PIXELS!!!\n return sqrt((drift_time(maximum(conversion_depth, undepleted_thickness)) *\n 2 * k * temp * low_field_mobility / e) + #depleted\n where(conversion_depth < undepleted_thickness,\n square(undepleted_thickness), 0)) / pixel_width #undepleted", "def arclength(c):\n r=c[1][0]\n start=c[1][1]\n end=c[1][2]\n if start == 0 and end == 360:\n return r * pi2\n else:\n start = start % 360.0\n end = end % 360.0\n\n if start > end:\n end = end+360.0\n d = pi2*r\n l = d*(end-start)/360.0\n return l", "def getDiameter(self):\n\n hdr = self.header\n if \"cd1_1\" in hdr:\n self.D = abs(hdr[\"cd1_1\"]) * hdr[\"naxis1\"]\n elif \"cdelt1\" in hdr:\n self.D = abs(hdr[\"cdelt1\"]) * hdr[\"naxis1\"]\n else:\n print(\"Warning: no coordinate information found in input header;\")\n print(\" pupil width assumed to be 6.5 meters\")\n self.D = 6.5", "def calculate_wavelength(period, depth, gravity):\r\n return geometry.gmCalculateWavelength(period, depth, gravity)", "def getWidth(self):\n area = self.getArea()\n length = self.getLength()\n return area / length", "def weight4width(box_width,platformWidth,stairsLength,stepCount,stepWidth):\n if (platformWidth-stairsLength)<0:\n platformWidth = stairsLength + 50 #platform width must larger than stairs length ,the value is 50\n return platformWidth\n else:return platformWidth", "def getWidth(self) -> int:\n ...", "def acW(self):\n return self.fuselageLength * self.posFraction", "def DSS28_beamwidth(freq):\n return 0.54/freq", "def sq_footage(length, width):\n return length * width", "def getWidth(self):\n wsum = 0.0\n for quad in self._quadrilaterals:\n wsum = wsum + get_quad_width(quad)\n mwidth = (wsum / len(self._quadrilaterals)) / 1000.0\n return mwidth", "def cw(w1):\n return (pic_width / float(w)) * w1", "def getWidth(self):\r\n width = 1\r\n if self.orientation == \"h\":\r\n width = self.size\r\n return width", "def getWidth(self):\n caller = self.getMyCaller()\n if caller.startsWith(\"java.\") or caller.startsWith(\"javax.\"):\n return super(Program_Test, self).getWidth()\n else:\n return getCentralRegionSize().width", "def psarclength(x):\n\tdef ds(x):\n\t\tdypsect = derivative(interpolate.splev, x, dx=1e-6, args=(tckps,), order=5)\n\t\treturn np.sqrt(1 + dypsect**2)\n\tquad, err = integrate.quad(ds, ps2Dsorted[0,0], x)\t\n\treturn quad", "def calc_optimal_spacing(sun_properties, tilt_angle, module_length):\n h = module_length * sin(tilt_angle)\n D1 = h / tan(radians(sun_properties.worst_sh))\n D = max(D1 * cos(radians(180 - sun_properties.worst_Az)), D1 * cos(radians(sun_properties.worst_Az - 180)))\n return D", "def __conv_inch(length_mm):\n return length_mm / 25.4", "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def get_do_port_width( channel ):\n width= uInt32(0)\n CALL('GetPhysicalChanDOPortWidth', channel, byref(width))\n return width.value", "def get_di_port_width( channel ):\n width = uInt32(0)\n CALL('GetPhysicalChanDIPortWidth', channel, byref(width))\n return width.value", "def get_image_width_in_degrees(image_width=100,viewing_distance=24.0,screen_pixel_size=0.282):\n \n mm_per_inch = 25.4\n degrees_per_image = np.degrees(np.arctan(((image_width*0.5)*screen_pixel_size)/(viewing_distance*mm_per_inch))*2.0)\n return degrees_per_image", "def width(self):\n return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)", "def molar_mass_dry_air():\n return 28.9647", "def transit_width(r, k, b, P=1):\n\n\treturn P*math.asin(r*math.sqrt( ((1+k)**2-b**2) / (1-b**2*r**2) ))/math.pi", "def dr_length(self):\n # note: DR chain length is a function of current IR chain state\n return self.chain_length(self.driver.scan_dr)", "def effective_width(self, intrinsic_width, dm, bandwidth, freq):\n a = sqrt(pow(intrinsic_width, 2) + pow((8.3e6 * fabs(dm) * (bandwidth / pow(freq, 3))), 2))\n return a", "def cuttingSpeed(self, diameter, rpm):\n return (math.pi * diameter * rpm)/12", "def r_width(self) -> int:\n return math.ceil(self.t_width / REGION_DIM)", "def getWidth(*args):", "def getWidth(*args):" ]
[ "0.59558606", "0.5946997", "0.5920999", "0.5686786", "0.5665531", "0.56501627", "0.56482404", "0.5642907", "0.56321955", "0.559535", "0.5512421", "0.55015284", "0.54587156", "0.5456422", "0.5454967", "0.5450522", "0.54350805", "0.53960043", "0.5390877", "0.5385902", "0.5382733", "0.53793937", "0.53788304", "0.5372086", "0.5362414", "0.5346844", "0.53233314", "0.532283", "0.53191805", "0.53191805" ]
0.6186894
0
Returns the momentum transfer in Ang^1 from xraylib [sin(2theta)/lam] E is the photon energy (eV or KeV) twotheta is the scattering angle in degrees
def MomentumTransfer(E,twotheta): E = keV(E) th = np.deg2rad(twotheta) p = xl.MomentTransf(E, th) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_theta_delta_v_trans_MOND(M1, M2, a, e, M, Omega, omega, inc):\n\n # Acceleration constant for MOND\n a0 = 1.2e-8\n\n # From Scarpa et al. (2017), MOND acts at separations larger than 7000 AU\n a_limit = 7000.0 * c.AU_to_cm\n\n # Calculate f's\n num_sys = len(M1)\n f = np.zeros(num_sys)\n proj_sep = np.zeros(num_sys)\n delta_v_trans = np.zeros(num_sys)\n delta_v_tot = np.zeros(num_sys)\n\n # Currently, eccentricity is not accounted for in our MOND implementation.\n # There are good reasons for this. Namely, it is not a defined quantity in\n # MOND orbits - non-circular orbits do not close in on themselves and form\n # rosettes, i.e. they have non-integer periodicity in azimuthal space.\n\n for i in np.arange(num_sys):\n\n if a[i] < 7000.0 * c.AU_to_cm/c.Rsun_to_cm:\n f[i] = get_f(M[i], e[i])\n\n # Calculate separations - in Rsun\n sep = a[i] * (1.0 - e[i]**2) / (1.0 + e[i]*np.cos(f[i]))\n proj_sep[i] = get_proj_sep(f[i], e[i], sep, Omega[i], omega[i], inc[i])\n\n # Orbital period in days\n P = a_to_P(M1[i], M2[i], a[i])\n # Calculate proper motions\n delta_v_trans[i] = get_delta_v_trans(f[i], e[i], a[i]*c.Rsun_to_cm,\n P*c.day_to_sec, Omega[i], omega[i], inc[i])\n delta_v_tot[i] = get_delta_v_tot(f[i], e[i], a[i]*c.Rsun_to_cm, P*c.day_to_sec)\n\n else:\n\n # Calculate separations - in Rsun\n proj_sep[i] = get_proj_sep(f[i], 0.0, a[i], Omega[i], 0.0, inc[i])\n\n r1 = (a[i]*c.Rsun_to_cm) / (1.0 + np.sqrt(M1[i]/M2[i]))\n r2 = (a[i]*c.Rsun_to_cm) / (1.0 + np.sqrt(M2[i]/M1[i]))\n a1 = c.GGG * M2[i]*c.Msun_to_g / (a[i]*c.Rsun_to_cm)**2\n a2 = c.GGG * M1[i]*c.Msun_to_g / (a[i]*c.Rsun_to_cm)**2\n v1 = (r1**2 * a1 * a0)**0.25\n v2 = (r2**2 * a2 * a0)**0.25\n\n v_diff = v1 + v2\n\n # Calculate proper motions\n v_x = -np.sin(omega[i]+f[i])*np.cos(Omega[i]) - \\\n e[i]*np.sin(omega[i])*np.cos(Omega[i]) - \\\n np.cos(omega[i]+f[i])*np.cos(inc[i])*np.sin(Omega[i]) - \\\n e[i]*np.cos(omega[i])*np.cos(inc[i])*np.sin(Omega[i])\n v_y = -np.sin(omega[i]+f[i])*np.sin(Omega[i]) - \\\n e[i]*np.sin(omega[i])*np.sin(Omega[i]) + \\\n np.cos(omega[i]+f[i])*np.cos(inc[i])*np.cos(Omega[i]) + \\\n e[i]*np.cos(omega[i])*np.cos(inc[i])*np.cos(Omega[i])\n delta_v_trans[i] = v_diff * np.sqrt(v_x**2 + v_y**2) / 1.0e5\n delta_v_tot[i] = v_diff / 1.0e5\n\n\n return proj_sep, delta_v_trans, delta_v_tot", "def calc_theta_delta_v_trans(M1, M2, a, e, M, Omega, omega, inc):\n\n # Calculate f's\n num_sys = len(M1)\n f = np.zeros(num_sys)\n for i in np.arange(num_sys):\n f[i] = get_f(M[i], e[i])\n\n # Calculate separations - in Rsun\n sep = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n proj_sep = get_proj_sep(f, e, sep, Omega, omega, inc)\n\n # Orbital period in days\n P = a_to_P(M1, M2, a)\n # Calculate proper motions\n delta_v_trans = get_delta_v_trans(f, e, a*c.Rsun_to_cm, P*c.day_to_sec, Omega, omega, inc)\n delta_v_tot = get_delta_v_tot(f, e, a*c.Rsun_to_cm, P*c.day_to_sec)\n\n return proj_sep, delta_v_trans, delta_v_tot", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def angular_momentum(self, AM):\n # Printing the amplitude to command line\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during %i year(s): %g[AU²/yr²]' \\\n %(self.t, amplitude))\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, AM)\n # Decorating the plot\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def MSPBE_tar(self, theta):\n V = np.array((theta * self.mu_phi_tar).sum(axis=1))\n V2 = self.gamma * np.array((theta * self.mu_phi_next_tar).sum(axis=1))\n return np.mean(np.array((V - np.dot(self.projection_operator(), V2 + self.mu_r_tar))) ** 2)", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def calculate_acceleration(self) -> np.array:\n F = self.calculate_net_force()\n m = self.mass\n a = F / m\n\n return a", "def MSBE_tar(self, theta):\n V = np.array((theta * self.mu_phi_tar).sum(axis=1))\n V2 = self.gamma * np.array((theta * self.mu_phi_next_tar).sum(axis=1))\n return np.mean((V - V2 - self.mu_r_tar) ** 2)", "def Recoil_energy_nr(En, theta):\n mp = 938.272046 # mass proton MeV\n mn = 939.565379 # mass neutron MeV\n Z = 10 # number of proton in Neon\n N = 10 # number of neutron in Neon\n mt = Z*mp+N*mn # mass target: nucleus in MeV\n angle = math.radians(theta) # angle in radian: need to convert from degree to rad for math.sin and math.cos\n Enr = 2*En*(mn**2/(mn+mt)**2)*((mt/mn)+math.sin(angle)**2-math.cos(angle)*math.sqrt((mt/mn)**2-math.sin(angle)**2))\n return Enr*1e3", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def energy_calculation(theta_0, omega_0, dt):\n samples = int(T/dt) # Finds samplerate for chosen dt\n \n # Creat array of values using Euler-Cromer approx\n thetaArr, omegaArr, timeArr = euler_cromer_approx(theta_0,omega_0,dt,T_i)\n \n # Function for total energy\n energy_func = lambda m,l,omega,theta: (1/2)*m*(l**2)*(omega**2) + (1/2)*m*g*l*(theta**2)\n \n # Time array in same dimension \n t = np.linspace(T_i,T,samples)\n energy = np.zeros(samples)\n \n for i in range(len(t)):\n \"\"\"\n Calculation of total energy for every time-element\n \"\"\"\n energy[i] = energy_func(m,l,omegaArr[i],thetaArr[i])\n \n \n E_total = energy\n\n return t, E_total", "def descent_acc(thrust, drone_mass, g=9.81, drag = 0.):\n output = (-thrust - drone_mass * g + drag) / drone_mass\n return output", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def FO2(lam):\n return 1.096 + 1.385 *1e-3 *lam**(-2) + 1.448 *1e-4 *lam**(-4)", "def dnde_photon_eta_omega(_, photon_energies, cme: float):\n if cme < meta + momega:\n return np.zeros_like(photon_energies)\n dnde1 = spectra.dnde_photon_eta\n dnde2 = spectra.dnde_photon_omega\n return _dnde_photon_two_body(\n photon_energies, cme=cme, m1=meta, m2=momega, dnde1=dnde1, dnde2=dnde2\n )", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def computeTangent(self):\n # return np.matmul(self.examples.T, self.gradAmbient)\n return self.gradAmbient + self.centroid * minkowskiDot(self.centroid, self.gradAmbient)", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def Ernst_T1(TR, alpha_e):\n return -TR / np.log(np.cos(alpha_e))", "def get_attention(hidden_state):\n inputs = tf.concat((hidden_state, processed_input[-1]), axis = 1)\n hidden_values = tf.nn.tanh(tf.matmul(inputs, Wa1) + ba1)\n e_values = (tf.matmul(hidden_values, Wa2) + ba2)\n return e_values", "def BraggEnergy(ID,hkl,twotheta):\n ID=goodID(ID)\n d=dSpace(ID,hkl)\n l=2*d*sind(twotheta/2.0)\n E=lam2E(l)\n return E", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def delayE(self):\n sinE = np.sin(self.E())\n return self.GAMMA", "def d_beta_d_EDOT(self):\n eTheta = self.eTheta()\n a1 = (self.a1()).decompose()\n sinOmg = np.sin(self.omega())\n cosOmg = np.cos(self.omega())\n return a1/c.c*((-eTheta)/np.sqrt(1-eTheta**2)*cosOmg*self.tt0- \\\n (1-eTheta**2)**0.5*sinOmg*self.d_omega_d_par('EDOT'))", "def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V", "def gyroe(E, B, pitch):\n V = ev2ve(E);\n Vperp = V*np.sqrt(1-pitch);\n return me * Vperp / eV2J / B;", "def G(x,mu,T):\r\n den1 = np.cosh(mu/(kb*T))/np.sinh(x/(kb*T))\r\n den2 = np.tanh(x/(kb*T))\r\n\r\n return 1/(den1 + den2)", "def getETA():", "def getETA():" ]
[ "0.6113359", "0.5922858", "0.5920171", "0.587831", "0.5770723", "0.5753359", "0.5733947", "0.57266885", "0.57000864", "0.5698715", "0.5668702", "0.55332893", "0.55264103", "0.5521733", "0.5514887", "0.55147564", "0.5503037", "0.55001146", "0.5495225", "0.5492762", "0.54698634", "0.54566085", "0.54559135", "0.54553205", "0.5449918", "0.54387224", "0.5438376", "0.5437633", "0.5420898", "0.5420898" ]
0.71416765
0
Gets the writable of this SkillPropertyModel.
def writable(self) -> bool: return self._writable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Writable(self):\n return self._get_attr('Writable')", "def get_property(self):\r\n _get = lambda slf: self.getval()\r\n _set = lambda slf, val: self.setval(val)\r\n _del = lambda slf: self.delval()\r\n\r\n if self.column.can_delete:\r\n return property(_get, _set, _del)\r\n else:\r\n return property(_get, _set)", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def writable(self):\n return 'w' in self._mode", "def writable(self):\n return True", "def read_only(self):\n ret_val = self._read_only()\n return ret_val", "def is_writable(self):\n raise NotImplementedError()", "def read_only(self) -> Optional[bool]:\n return self._read_only", "def is_read_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_read_only\")", "def readonly(self):\n return self._readonly", "def readonly(self):\n return self._readonly", "def prop(self):\n return self._prop", "def writable(self):\n return bool(self.buffer)", "def immutability_policy(self) -> 'outputs.ImmutabilityPolicyPropertiesResponse':\n return pulumi.get(self, \"immutability_policy\")", "def writable(self, writable: bool):\n if writable is None:\n raise ValueError(\"Invalid value for `writable`, must not be `None`\") # noqa: E501\n \n self._writable = writable", "def is_setter(self):\n return self._is_setter", "def property(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"property\")", "def read_only(self):\n return bool(self.__read_only)", "def is_mutable(self):\n return self._mutable", "def dbus_access_flag(self):\n if self._getf and self._setf:\n return \"readwrite\"\n elif self._getf:\n return \"read\"\n elif self._setf:\n return \"write\"\n else:\n raise TypeError(\n \"property provides neither readable nor writable\")", "def isWriteable(self, name):\n pass", "def skill(self):\n return self._get(\"skill\")", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def prop(self):\n return getattr(self, name)", "def require_encryption_for_write_access(self):\n if \"requireEncryptionForWriteAccess\" in self._prop_dict:\n return self._prop_dict[\"requireEncryptionForWriteAccess\"]\n else:\n return None", "def properties_get(self):\n return self._get('properties')", "def mark_as_read(self):\n if \"markAsRead\" in self._prop_dict:\n return self._prop_dict[\"markAsRead\"]\n else:\n return None" ]
[ "0.7035221", "0.5944507", "0.59027547", "0.59027547", "0.59027547", "0.59027547", "0.58960843", "0.58916765", "0.5811473", "0.58107597", "0.5795605", "0.57830673", "0.5729404", "0.5729404", "0.5719634", "0.5717049", "0.5712049", "0.5704419", "0.5677743", "0.5675228", "0.5670562", "0.56680924", "0.5532757", "0.54758984", "0.5437725", "0.5422067", "0.5379142", "0.537214", "0.53706944", "0.53693706" ]
0.6624925
1
Sets the writable of this SkillPropertyModel.
def writable(self, writable: bool): if writable is None: raise ValueError("Invalid value for `writable`, must not be `None`") # noqa: E501 self._writable = writable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writable(self):\n return True", "def writable(self) -> bool:\n return self._writable", "def __init__(self, writable: bool = None): # noqa: E501\n self.swagger_types = {\n 'writable': bool\n }\n \n self.attribute_map = {\n 'writable': 'writable'\n }\n self._writable = writable", "def Writable(self):\n return self._get_attr('Writable')", "def is_writable(self):\n raise NotImplementedError()", "def __set__(self, instance, value):\n raise AttributeError(\"A Default Property is Read only\")", "def writable(self):\n ...", "def set_data_writable(self):\n pass", "def isWriteable(self, name):\n pass", "def __set__(self, model_instance, value):\r\n raise ValueError, 'Virtual property is read-only'", "def writable(self):\n return 'w' in self._mode", "def setter(self, setter):\n self.accessor.setter = setter", "def setReadOnly(self, state: bool) -> None:\n ...", "def is_setter(self):\n return self._is_setter", "def _set_read_only(self, read_only):\n self._read_only = read_only", "def MakeWritable():\n return shell.ShellCommand(\n name = \"make writable\",\n haltOnFailure = 1,\n description = [\"making writable\"],\n descriptionDone = [\"made writable\"],\n command = [\"chmod\", \"-R\", \"+w\", \".\"],\n )", "def forced(setter):\n @wraps(setter)\n def __set__(desc, instance, value, forced=False):\n if forced:\n return setter(desc, instance, value)\n else:\n raise AttributeError(\"Cannot set a read-only attribute\")\n return __set__", "def changeProperty(self, node, name, propertyName, value, setIfNotExist=False):", "def on_show_only_writable(self):\n self._set_filter_value(\n 'onlyWritableState', self.writable_btn.isChecked())", "def __setattr__(self, name, value):\n if self.serviceimplementation == 'basic':\n if name in ('serviceproperties', 'localProperties', 'internal_attributes', 'propertysynonyms',\n 'forceGetProperty'):\n pass\n elif name[0:2] == '__' or name in self.internal_attributes or name in self.localProperties:\n pass\n elif name in self.serviceproperties or name in self.propertysynonyms:\n if name in self.propertysynonyms: # Reset real name if argument provided in lower or camel case\n name = self.propertysynonyms[name]\n if self.internal: # internal = True forces property local setting even if property is read-only\n pass\n elif self.serviceproperties[name] is True: # True == Editable\n self.SetProperty(name, value)\n return\n else:\n raise AttributeError(\n \"type object '\" + self.objecttype + \"' has no editable property '\" + name + \"'\")\n else:\n raise AttributeError(\"type object '\" + self.objecttype + \"' has no property '\" + name + \"'\")\n object.__setattr__(self, name, value)\n return", "def set_property(self, name, value, persist_changes=True):\n self._properties_metadata[name] = {'readonly': not persist_changes}\n self._properties[name] = value", "def autoprops_generated_setter(self, **kwargs):\n setattr(self, private_property_name, kwargs[property_name])", "def set_mute(cls, mute: bool):\n raise NotImplementedError", "def canwrite(self):\n return False", "def set_read_only(self, bReadOnly):\n\t\tcall_sdk_function('PrlShare_SetReadOnly', self.handle, bReadOnly)", "def set_input_to_read_only(self):\n super(SxHarmPotTst, self).set_input_to_read_only()\n self.input.read_only = True", "def SetWirelessProperty(self, networkid, prop, value):\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon is not\" \\\n + \" permitted.\"\n return False\n self.LastScan[networkid][prop] = misc.Noneify(value)", "def writable(name):", "def read_only(self, read_only):\n\n self._read_only = read_only", "def read_only(self, read_only):\n\n self._read_only = read_only" ]
[ "0.59215254", "0.59071964", "0.5861543", "0.57983047", "0.5595328", "0.557937", "0.5561954", "0.55557615", "0.54962236", "0.5457732", "0.5410007", "0.539612", "0.53701144", "0.5273163", "0.5265374", "0.524871", "0.5225597", "0.5134648", "0.50857335", "0.5045085", "0.5008741", "0.5001664", "0.49920127", "0.49919766", "0.4980491", "0.49612042", "0.49506238", "0.495005", "0.49453825", "0.49453825" ]
0.6848805
0
Test whether a MARWILAlgorithm can be built with all frameworks. Learns from a historicdata file.
def test_marwil_compilation_and_learning_from_offline_file(self): rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = ( marwil.MARWILConfig() .rollouts(num_rollout_workers=2) .environment(env="CartPole-v1") .evaluation( evaluation_interval=3, evaluation_num_workers=1, evaluation_duration=5, evaluation_parallel_to_training=True, evaluation_config=marwil.MARWILConfig.overrides(input_="sampler"), off_policy_estimation_methods={}, ) .offline_data(input_=[data_file]) ) num_iterations = 350 min_reward = 70.0 # Test for all frameworks. for _ in framework_iterator(config, frameworks=("tf", "torch")): algo = config.build() learnt = False for i in range(num_iterations): results = algo.train() check_train_results(results) print(results) eval_results = results.get("evaluation") if eval_results: print( "iter={} R={} ".format(i, eval_results["episode_reward_mean"]) ) # Learn until some reward is reached on an actual live env. if eval_results["episode_reward_mean"] > min_reward: print("learnt!") learnt = True break if not learnt: raise ValueError( "MARWILAlgorithm did not reach {} reward from expert " "offline data!".format(min_reward) ) check_compute_single_action(algo, include_prev_action_reward=True) algo.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_maya():\n return \"maya.bin\" in sys.argv[0]", "def test_marwil_compilation(self):\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n num_iterations = 2\n\n # Test for all frameworks.\n for _ in framework_iterator(config):\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n trainer.train()\n check_compute_action(trainer, include_prev_action_reward=True)\n trainer.stop()", "def has(self, label: str) -> bool:\n return any(\n True\n for package in self.packages\n for resource in package.resources\n if resource[\"matrix\"] == f\"{label}_matrix\"\n )", "def test_marwil_cont_actions_from_offline_file(self):\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/pendulum/large.json\")\n print(\"data_file={} exists={}\".format(data_file, os.path.isfile(data_file)))\n\n config = (\n marwil.MARWILConfig()\n .rollouts(num_rollout_workers=1)\n .evaluation(\n evaluation_num_workers=1,\n evaluation_interval=3,\n evaluation_duration=5,\n evaluation_parallel_to_training=True,\n # Evaluate on actual environment.\n evaluation_config=marwil.MARWILConfig.overrides(input_=\"sampler\"),\n off_policy_estimation_methods={},\n )\n .offline_data(\n # Learn from offline data.\n input_=[data_file],\n )\n )\n\n num_iterations = 3\n\n # Test for all frameworks.\n for _ in framework_iterator(config, frameworks=(\"tf\", \"torch\")):\n algo = config.build(env=\"Pendulum-v1\")\n for i in range(num_iterations):\n print(algo.train())\n algo.stop()", "def isModellingFramework(*args):\n return _libsbml.SBO_isModellingFramework(*args)", "def check_metamodel(\n dimension: int,\n num_workers: int,\n scale: float,\n budget: int,\n ellipsoid: bool,\n baseline: str,\n num_trials: int = 1,\n) -> None:\n target = QuadFunction(scale=scale, ellipse=ellipsoid)\n # In both cases we compare MetaModel and CMA for a same given budget.\n # But we expect MetaModel to be clearly better only for a larger budget in the ellipsoid case.\n contextual_budget = budget if ellipsoid else 3 * budget\n contextual_budget *= int(max(1, np.sqrt(scale)))\n successes = 0\n for _ in range(num_trials):\n if successes > num_trials // 2: # We already have enough\n break\n\n # Let us run the comparison.\n recommendations: tp.List[np.ndarray] = []\n for name in (\"MetaModel\", baseline if dimension > 1 else \"OnePlusOne\"):\n opt = registry[name](dimension, contextual_budget, num_workers=num_workers)\n recommendations.append(opt.minimize(target).value)\n metamodel_recom, default_recom = recommendations # pylint: disable=unbalanced-tuple-unpacking\n\n # Let us assert that MetaModel is better.\n if target(default_recom) < target(metamodel_recom):\n continue\n\n # With large budget, the difference should be significant.\n if budget > 60 * dimension:\n if not target(default_recom) > 4.0 * target(metamodel_recom):\n continue\n\n # ... even more in the non ellipsoid case.\n if budget > 60 * dimension and not ellipsoid:\n if not target(default_recom) > 7.0 * target(metamodel_recom):\n continue\n successes += 1\n assert successes > num_trials // 2, f\"Problem for beating {baseline}.\"", "def _has_processed_data(self):\n return \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_train_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_dev_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._word_vocab_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._char_vocab_file_name))", "def SBO_isModellingFramework(*args):\n return _libsbml.SBO_isModellingFramework(*args)", "def _is_azureml_available() -> bool:\n if importlib.util.find_spec(\"azureml\") is None:\n return False\n if importlib.util.find_spec(\"azureml.core\") is None:\n return False\n return importlib.util.find_spec(\"azureml.core.run\") is not None", "def _validate_built_in(self, tipo):\n\n self.source_type = False\n self.source_file = \"builtin\"\n return tipo in self.c_built_ins or self._match_array(tipo, self.c_built_in_array_types)", "def has_mkl(self):\n return self.mkl_lib is not None", "def checkAnalysis(self) -> bool:\n\n if len(self.materials) == 0:\n raise AnalysisError('No material models have been assigned to the analysis')\n\n for material in self.materials:\n if not material.isValid():\n raise AnalysisError('Material ({:s}) is not valid'.format(material.name))\n\n\n return True", "def check_data():\n check_docs(\"Training\")\n check_docs(\"dev\")\n check_docs(\"Test\")", "def can_process(dict_data: dict) -> bool:\n return dict_data[\"experiment\"] in [_023_EXPERIMENT]", "def test_machine_learning():", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def test_corpuses_are_valid():\n DirContainsProtos(\n \"phd/experimental/deeplearning/polyglot/corpuses\", corpus_pb2.Corpus\n )", "def DO(experiment, ln):\n # Determine if the library needs to be constructed\n do_init, do_mature = initial_check(experiment, ln)\n # If it should be, then do so\n if do_init:\n # Initialize the library\n initialize_libraries(experiment, ln)\n # If an affinity maturation should be done\n if do_mature:\n # Affinity maturation of the antibody \n Affinity_Maturation(experiment, ln)\n # Check to see if everything is finished\n finished = check_finish(experiment, ln)\n # Finish the experiment\n if finished:\n Finish(experiment, ln)\n sys.exit(0)", "def _IsApplicable(self, manifest):\n check_list = [(self.tool, manifest.tool), (self.board, manifest.board)]\n\n return all(fnmatch(text, pattern) for text, pattern in check_list)", "def check_matrices(self, folder=None):\n if folder is None:\n abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))\n folder = os.path.join(abs_path, 'matrix')\n\n matrix_file_x, matrix_file_y = self.matrices_names(folder=folder)\n\n x_file = listdirectory(folder, matrix_file_x)\n y_file = listdirectory(folder, matrix_file_y)\n\n matrices_exist = x_file is True and y_file is True\n return matrices_exist", "def assert_train_augmented(self) -> bool:\n dalet = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\" / \"Dalet\"\n truth_value = False\n try:\n if len(list(dalet.iterdir())) != 72: # downloaded number of chars\n truth_value = True\n except FileNotFoundError:\n pass # this is ok because we handle the truth_value\n return truth_value", "def usage_of_matomo(registry):\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False", "def isAnalysisRecipe(self):\r\n return True", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def validate(self, framework) -> bool:\n\n # Make sure that all of the quantities the Framework says we should read in have been read in, and that\n # those quantities all have some data values associated with them\n for pop in self.pops.values():\n if pd.isna(pop[\"type\"]):\n pop[\"type\"] = self._pop_types[0]\n assert pop[\"type\"] in self._pop_types, 'Error in population \"%s\": population type \"%s\" not found in framework. If the framework defines a non-default population type, then it must be explicitly specified in databooks and program books.' % (pop[\"label\"], pop[\"type\"])\n\n for obj_type, df in zip([\"comps\", \"characs\", \"pars\"], [framework.comps, framework.characs, framework.pars]):\n for spec_name, spec in zip(df.index, df.to_dict(orient=\"records\")):\n if spec_name in self.pops:\n raise Exception('Code name \"%s\" has been used for both a population and a framework quantity - population names must be unique' % (spec_name))\n\n if not pd.isna(spec[\"databook page\"]):\n if spec_name not in self.tdve:\n if not np.isfinite(spec[\"default value\"]):\n raise Exception('The databook did not contain a required TDVE table named \"%s\" (code name \"%s\")' % (spec[\"display name\"], spec_name))\n else:\n logger.warning('TDVE table \"%s\" (code name \"%s\") is missing from the databook. Using default values from the framework' % (spec[\"display name\"], spec_name))\n units = framework.get_databook_units(spec_name)\n self.tdve[spec_name] = TimeDependentValuesEntry(spec[\"display name\"], self.tvec.copy(), allowed_units=[units], comment=spec[\"guidance\"], pop_type=spec[\"population type\"])\n for pop in self.pops.keys():\n self.tdve[spec_name].ts[pop] = TimeSeries(assumption=spec[\"default value\"], units=units)\n tdve_page = framework.sheets[\"databook pages\"][0][framework.sheets[\"databook pages\"][0][\"datasheet code name\"] == spec[\"databook page\"]][\"datasheet title\"].values[0]\n if tdve_page in self.tdve_pages:\n self.tdve_pages[tdve_page].append(spec_name)\n else:\n self.tdve_pages[tdve_page] = [spec_name]\n else:\n framework_units = framework.get_databook_units(spec_name) # Get the expected databook units\n tdve = self.tdve[spec_name]\n tdve_sheet = self.get_tdve_page(spec_name)\n location = 'Error in TDVE table \"%s\" on sheet \"%s\"' % (tdve.name, tdve_sheet)\n assert tdve.pop_type in self._pop_types, '%s. Population type \"%s\" did not match any in the framework' % (location, tdve.pop_type)\n\n required_pops = [x for x, y in self.pops.items() if y[\"type\"] == tdve.pop_type] # The TDVE should contain values for all populations of that type, otherwise cannot construct the ParameterSet. Check that these populations are all present\n missing_pops = set(required_pops).difference(tdve.ts.keys())\n if missing_pops:\n raise Exception(\"%s. The following populations were not supplied but are required: %s\" % (location, missing_pops))\n\n for name, ts in self.tdve[spec_name].ts.items():\n assert ts.has_data, \"%s. Data values missing for %s (%s)\" % (location, tdve.name, name)\n assert ts.units is not None, \"%s. Units missing for %s (%s)\" % (location, tdve.name, name)\n if ts.units.strip().lower() != framework_units.strip().lower():\n # If the units don't match the framework's 'databook' units, see if they at least match the standard unit (for legacy databooks)\n # For compartments and characteristics, the units must match exactly\n if obj_type in [\"comps\", \"characs\"] or (\"format\" in spec and spec[\"format\"] is not None and ts.units.lower().strip() != spec[\"format\"].lower().strip()):\n assert ts.units == framework_units, '%s. Unit \"%s\" for %s (%s) does not match the declared units from the Framework (expecting \"%s\")' % (location, ts.units, tdve.name, name, framework_units)\n if obj_type == \"par\" and spec[\"timed\"] == \"y\":\n assert not ts.has_time_data, \"%s. Parameter %s (%s) is marked as a timed transition in the Framework, so it must have a constant value (i.e., the databook cannot contain time-dependent values for this parameter)\" % (location, tdve.name, name)\n\n for tdc in self.interpops + self.transfers:\n if tdc.from_pop_type is None: # Supply default pop type\n tdc.from_pop_type = self._pop_types[0]\n assert tdc.from_pop_type in self._pop_types, 'Error in transfer/interaction \"%s\": from population type \"%s\" not found in framework. If the framework defines a non-default population type, then it must be explicitly specified in databooks and program books.' % (tdc.full_name, tdc.from_pop_type)\n if tdc.to_pop_type is None: # Supply default pop type\n tdc.to_pop_type = self._pop_types[0]\n assert tdc.to_pop_type in self._pop_types, 'Error in transfer/interaction \"%s\": to population type \"%s\" not found in framework. If the framework defines a non-default population type, then it must be explicitly specified in databooks and program books.' % (tdc.full_name, tdc.to_pop_type)\n\n for _, spec in framework.interactions.iterrows():\n for tdc in self.interpops:\n if tdc.code_name == spec.name:\n for (from_pop, to_pop), ts in tdc.ts.items():\n assert to_pop in self.pops, 'Population \"%s\" in \"%s\" not recognized. Should be one of: %s' % (to_pop, spec.name, self.pops.keys())\n assert self.pops[to_pop][\"type\"] == tdc.to_pop_type, 'Interaction \"%s\" has to-population type \"%s\", but contains Population \"%s\", which is type \"%s\"' % (tdc.full_name, tdc.to_pop_type, to_pop, self.pops[to_pop][\"type\"])\n assert from_pop in self.pops, 'Population \"%s\" in \"%s\" not recognized. Should be one of: %s' % (from_pop, spec.name, self.pops.keys())\n assert self.pops[from_pop][\"type\"] == tdc.from_pop_type, 'Interaction \"%s\" has from-population type \"%s\", but contains Population \"%s\", which is type \"%s\"' % (tdc.full_name, tdc.from_pop_type, from_pop, self.pops[from_pop][\"type\"])\n assert ts.has_data, \"Data values missing for interaction %s, %s->%s\" % (spec.name, to_pop, from_pop)\n assert ts.units.lower().title() == FS.DEFAULT_SYMBOL_INAPPLICABLE.lower().title(), 'Units error in interaction %s, %s->%s. Interaction units must be \"N.A.\"' % (spec.name, to_pop, from_pop)\n break\n else:\n raise Exception('Required interaction \"%s\" not found in databook' % spec.name)\n\n for tdc in self.transfers:\n for (from_pop, to_pop), ts in tdc.ts.items():\n assert to_pop in self.pops, 'Population \"%s\" in \"%s\" not recognized. Should be one of: %s' % (to_pop, tdc.full.name, self.pops.keys())\n assert self.pops[to_pop][\"type\"] == tdc.to_pop_type, 'Transfer \"%s\" has population type \"%s\", but contains Population \"%s\", which is type \"%s\"' % (tdc.full_name, tdc.to_pop_type, to_pop, self.pops[to_pop][\"type\"])\n assert from_pop in self.pops, 'Population \"%s\" in \"%s\" not recognized. Should be one of: %s' % (from_pop, tdc.full.name, self.pops.keys())\n assert self.pops[from_pop][\"type\"] == tdc.from_pop_type, 'Transfer \"%s\" has population type \"%s\", but contains Population \"%s\", which is type \"%s\"' % (tdc.full_name, tdc.from_pop_type, from_pop, self.pops[from_pop][\"type\"])\n assert ts.has_data, \"Data values missing for transfer %s, %s->%s\" % (tdc.full_name, to_pop, from_pop)\n assert ts.units is not None, \"Units are missing for transfer %s, %s->%s\" % (tdc.full_name, to_pop, from_pop)\n return True", "def built(self) -> bool:\n raise NotImplementedError()", "def check(module: str, force: bool = False) -> bool:\n lemmatizer = get_model(module)\n return False not in [\n os.path.exists(\n get_path(module, file.name)\n )\n for file in lemmatizer.DOWNLOADS\n ] or force", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def detect_if_qlm():\n try:\n importlib.import_module(\"qat.linalg\")\n print(\"=> Detected a QLM installation. <=\")\n return True\n except ModuleNotFoundError:\n print(\"=> No QLM installation detected, adding myQLM to the dependencies. <=\")\n return False" ]
[ "0.6046675", "0.572271", "0.55824965", "0.55451214", "0.54947644", "0.54622614", "0.5452598", "0.5437318", "0.54144233", "0.5404908", "0.5300404", "0.5271665", "0.5248322", "0.52429456", "0.5239995", "0.52355534", "0.5219218", "0.52116257", "0.52109", "0.518551", "0.51845616", "0.51754063", "0.5161352", "0.5158525", "0.51507175", "0.5131109", "0.51294535", "0.5127336", "0.5127336", "0.5125658" ]
0.6203485
0
Test whether MARWIL runs with cont. actions. Learns from a historicdata file.
def test_marwil_cont_actions_from_offline_file(self): rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/pendulum/large.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = ( marwil.MARWILConfig() .rollouts(num_rollout_workers=1) .evaluation( evaluation_num_workers=1, evaluation_interval=3, evaluation_duration=5, evaluation_parallel_to_training=True, # Evaluate on actual environment. evaluation_config=marwil.MARWILConfig.overrides(input_="sampler"), off_policy_estimation_methods={}, ) .offline_data( # Learn from offline data. input_=[data_file], ) ) num_iterations = 3 # Test for all frameworks. for _ in framework_iterator(config, frameworks=("tf", "torch")): algo = config.build(env="Pendulum-v1") for i in range(num_iterations): print(algo.train()) algo.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_data_by_filename(fname):\n return \"Run2017\" in fname", "def test_rawdata(data):\n base = list(data)[0]\n if base in [\"tv\",\"leftovers\",\"tv short\",\"movie\",\"OVA / ONA / Special\"]:\n return True\n return False", "def test_marwil_compilation_and_learning_from_offline_file(self):\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/cartpole/large.json\")\n print(\"data_file={} exists={}\".format(data_file, os.path.isfile(data_file)))\n\n config = (\n marwil.MARWILConfig()\n .rollouts(num_rollout_workers=2)\n .environment(env=\"CartPole-v1\")\n .evaluation(\n evaluation_interval=3,\n evaluation_num_workers=1,\n evaluation_duration=5,\n evaluation_parallel_to_training=True,\n evaluation_config=marwil.MARWILConfig.overrides(input_=\"sampler\"),\n off_policy_estimation_methods={},\n )\n .offline_data(input_=[data_file])\n )\n\n num_iterations = 350\n min_reward = 70.0\n\n # Test for all frameworks.\n for _ in framework_iterator(config, frameworks=(\"tf\", \"torch\")):\n algo = config.build()\n learnt = False\n for i in range(num_iterations):\n results = algo.train()\n check_train_results(results)\n print(results)\n\n eval_results = results.get(\"evaluation\")\n if eval_results:\n print(\n \"iter={} R={} \".format(i, eval_results[\"episode_reward_mean\"])\n )\n # Learn until some reward is reached on an actual live env.\n if eval_results[\"episode_reward_mean\"] > min_reward:\n print(\"learnt!\")\n learnt = True\n break\n\n if not learnt:\n raise ValueError(\n \"MARWILAlgorithm did not reach {} reward from expert \"\n \"offline data!\".format(min_reward)\n )\n\n check_compute_single_action(algo, include_prev_action_reward=True)\n\n algo.stop()", "def check_correct_marc_035_exists(marc_035s):\n for index, m35 in enumerate(marc_035s):\n if \"9\" in m35 and \"arxiv\" in m35[\"9\"].lower():\n return True", "def is_fluorescence(file):\n for line in read_file(file):\n if \"TD=\" in line.upper():\n return True\n return False", "def perform_filecheck():\n\n\t# Open files\n\ttrain = open('train_aae_final', 'r')\n\ttest = open('test_aae_final', 'r')\n\n\n\t# Check number of training and testing samples\n\tprint (\"\")\n\tprint (\"Number of training samples =\", len(train.readlines()))\n\tprint (\"Number of testing samples =\", len(test.readlines()))\n\tprint (\"\")\n\n\ttrain.close()\n\ttest.close()", "def check_for_augmented_data(data_dir):\n if \"augmented_dogs.csv\" in os.listdir(data_dir):\n print(\"Augmented data found, would you like to use it? y/n\")\n print(\">> \", end=\"\")\n rep = str(input())\n return rep == \"y\"\n return False", "def sniff( self, filename ):\n owl_marker = re.compile(r'\\<owl:')\n with open( filename ) as handle:\n # Check first 200 lines for the string \"<owl:\"\n first_lines = handle.readlines(200)\n for line in first_lines:\n if owl_marker.search( line ):\n return True\n return False", "def test_contains_month_true(self):\n ary = self.ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')[2009]\n self.assertTrue(11 in ary)", "def test_murim_recall_annFiles():\n\n quality_cutoff = float(100)\n we_are_missing = cmp_murim_mutations_yusan_2.get_mutations_we_miss(quality_cutoff)\n use_data_dir = '../data/all_non_ref_hg18/'\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n cancer_intr = len(we_are_missing) - len(set(cancer_qualities) & set(we_are_missing))\n normal_intr = len(we_are_missing) - len(set(normal_qualities) & set(we_are_missing))\n for chrpos in we_are_missing:\n nose.tools.assert_true(chrpos in normal_qualities,\n chrpos + ' missing in normal, ' + str(cancer_intr) + ' ' + str(normal_intr))\n nose.tools.assert_true(chrpos in cancer_qualities,\n chrpos + ' missing in cancer, ' + str(cancer_intr) + ' ' + str(normal_intr))", "def in_maya():\n return \"maya.bin\" in sys.argv[0]", "def check_up(year, batch_number):\n\tdm_name = \"Data_Matrices/\"+str(year) + \"_\" + str(batch_number) + \"_data_matrix.csv\"\n\tlf_name = \"Log_Files/\"+str(year) + \"_\" + str(batch_number) + \"_log_file.txt\"\n\n\tdm_lines = sum(1 for line in open(dm_name,'r'))\n\tlf_lines = sum(1 for line in open(lf_name,'r'))\n\tif dm_lines != lf_lines:\n\t\traise ValueError(\"DATA MATRIX {}_{} HAS DIFFERENT NUMBER OF LINES THAN LOG FILE\".format(year, batch_number))\n\n\twith open(dm_name,'r') as dm:\n\t\twith open(lf_name, 'r') as lf:\n\t\t\tdm_reader = csv.reader(dm)\n\t\t\tdm_numbers = [line[0] for line in dm_reader]\n\t\t\tlf_numbers = [line.split()[0] for line in lf]\n\tif dm_numbers != lf_numbers:\n\t\tprint(dm_numbers)\n\t\tprint(lf_numbers)\n\t\traise ValueError(\"DATA MATRIX {}_{} CONTAINS DIFFERENT DOCUMENTS THAN LOG FILE\".format(year, batch_number))\n\n\twith open(dm_name,'r') as dm:\n\t\tdm_reader = csv.reader(dm)\n\t\tfor line in dm_reader:\n\t\t\twords = line[1:]\n\t\t\tbreak\n\tfor word in words:\n\t\tif not word.isalpha():\n\t\t\traise ValueError(\"DATA MATRIX {}_{} CONTAINS NON-ALPHABETIC WORD\".format(year, batch_number))\n\n\twith open(lf_name,'r') as lf:\n\t\tline = next(lf)\n\t\tlf_words = sum([float(line.split(\" --- \")[2].split()[0]) for line in lf])\n\tif len(words) != lf_words:\n\t\traise ValueError(\"DATA MATRIX {}_{} AND LOG FILE COUNT DIFFERENT NUMBER OF WORDS\".format(year, batch_number))", "def test_get_corpus(self):\n references = pre.read_data(self.testfilename)\n corpus = pre.get_corpus(references)\n truth = ['m jones', 'e rundensteiner', 'y huang', 'matthew c jones', \n 'e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra', \n 'matthew c jones', 'e rundensteiner', 'y huang', 'mike w miller',\n 'l berg', 'mike w miller', 'c chen', 'd kung', 'j samuel', 'j gao',\n 'p hsia', 'y toyoshima', 'jane j robinson', 'jane j robinson',\n 'a gupta', 'a gonzalez', 'a hamid', 'c overstreet', 'h wahab', 'j wild',\n 'k maly', 's ghanem', 'x zhu', 'mary d brown', 'y patt']\n self.assertEquals(corpus, truth)", "def check_data():\n check_docs(\"Training\")\n check_docs(\"dev\")\n check_docs(\"Test\")", "def read(self, word: str) -> Union[bool, str]:\n start_sets = self._sets_start()\n try:\n casuistic = [self._read(word, i.state) for i in start_sets]\n except ValueError:\n return \"No Has intraducido una cadena valida\"\n return any(casuistic)", "def assert_train_augmented(self) -> bool:\n dalet = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\" / \"Dalet\"\n truth_value = False\n try:\n if len(list(dalet.iterdir())) != 72: # downloaded number of chars\n truth_value = True\n except FileNotFoundError:\n pass # this is ok because we handle the truth_value\n return truth_value", "def test_online_learning(self):\n model = PoincareModel(self.data, burn_in=0, negative=3)\n self.assertEqual(len(model.kv.vocab), 7)\n self.assertEqual(model.kv.vocab['kangaroo.n.01'].count, 3)\n self.assertEqual(model.kv.vocab['cat.n.01'].count, 1)\n model.build_vocab([('kangaroo.n.01', 'cat.n.01')], update=True) # update vocab\n self.assertEqual(model.kv.vocab['kangaroo.n.01'].count, 4)\n self.assertEqual(model.kv.vocab['cat.n.01'].count, 2)", "def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result", "def validateFromFile(cls,infile,target):\n corpus = Corpus()\n if corpus.readFromFile(infile):\n return( Validator.validate(corpus,target) )\n else:\n return(False)", "def is_lyrics_approved():", "def test_read_data_unlabeled(self):\n references = pre.read_data(self.testfilename)\n truth = [\n [Reference(0, 'm jones', \n 'symbol intersect detect method improv spatial intersect join', \n ['e rundensteiner', 'y huang'], 'geoinformatica', None),\n Reference(1, 'matthew c jones', \n 'improv spatial intersect join symbol intersect detect', \n ['e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra'], \n 'sigmodels.intern manag data', None),\n Reference(2, 'matthew c jones',\n 'view materi techniqu complex hirarch object', ['e rundensteiner',\n 'y huang'], 'ssd symposium larg spatial databas', None)],\n [Reference(3, 'mike w miller', 'domin draw bipartit graph', \n ['l berg'], 'sigucc special interest group univers comput servic',\n None),\n Reference(4, 'mike w miller', 'rel compromis statist databas', \n [], 'sigucc special interest group univers comput servic', None)],\n [Reference(5, 'c chen', 'formal approach scenario analysi',\n ['d kung', 'j samuel', 'j gao', 'p hsia', 'y toyoshima'],\n 'ieee softwar', None)],\n [Reference(6, 'jane j robinson', 'discours code clue context', [], \n 'acl meet the associ comput linguist', None),\n Reference(7, 'jane j robinson', 'diagram grammar dialogu', [],\n 'cooper interfac inform system', None)],\n [Reference(8, 'a gupta', 'iri h java distanc educ', ['a gonzalez', \n 'a hamid', 'c overstreet', 'h wahab', 'j wild', 'k maly', 's ghanem',\n 'x zhu'], 'acm journal educ resourc comput', None)],\n [Reference(9, 'mary d brown',\n 'intern redund represent limit bypass support pipelin adder regist'\n 'file', ['y patt'], 'proceed the th ieee intern symposium high '\n 'perform comput architectur hpca intern symposium high perform '\n 'comput architectur talk slide', None)]]\n self.assertEquals(references, truth)", "def CrossCheck(dataloader):", "def an_check(self):\n\t\tfor filles in self.xelt:\n\t\t\t# parcours rapide des branches niveau 1\n\t\t\tif search(r'analytic$', filles.tag):\n\t\t\t\treturn True\n\t\treturn False", "def test_admit_records_for_spell(self):\n records = self.admitgen.data.findall('record')\n admit_activity_record = records[3]\n admit_record = records[4]\n admit_activity_update_record = records[5]\n self.assertEqual(admit_activity_record.attrib['model'], 'nh.activity',\n 'Incorrect model for admit activity record')\n self.assertEqual(admit_record.attrib['model'],\n 'nh.clinical.patient.admission',\n 'Inccorect model for admit record')\n self.assertEqual(admit_activity_update_record.attrib['model'],\n 'nh.activity',\n 'Incorrect model for update activity record update')", "def test_contains():\n atom = ATOMClassifier(X_class, y_class, random_state=1)\n atom.run(\"Tree\")\n assert \"alcohol\" in atom.tree", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def read_predicate(interactions_df, obs_interactions, target_interactions, truth_interactions, fold, setting):\n print(\"predicate_construction: read_predicate:\")\n\n def write(s, p):\n print(\"predicate_construction: read_predicate: writing: \" + \n './goodreads/' + str(fold) + '/' + setting + '/read_' + p + '.txt') \n s.to_csv('./goodreads/' + str(fold) + '/' + setting + '/read_' + p + '.txt',\n sep='\\t', header=False, index=True)\n\n # observed predicates\n partition = 'obs'\n observed_interactions_df = interactions_df.loc[obs_interactions, :]\n read_interactions = observed_interactions_df[observed_interactions_df.is_read]\n read_series = pd.Series(data=1, index=read_interactions.index, name='read')\n write(read_series, partition)\n\n # truth predicates\n partition = 'truth'\n truth_interactions_df = interactions_df.loc[truth_interactions, :]\n read_interactions = truth_interactions_df[truth_interactions_df.is_read]\n read_series = pd.Series(data=1, index=read_interactions.index, name='read')\n write(read_series, partition)\n\n # target predicates\n partition = 'targets'\n missing_reads = observed_interactions_df[~observed_interactions_df.is_read].index\n augmented_targets = target_interactions.union(missing_reads)\n read_df = pd.DataFrame(index=augmented_targets)\n write(read_df, partition)", "def read_data(self, representation, filename) -> bool:\n self.adjacency_matrix = self.reader.read_data(representation, filename)\n if self.adjacency_matrix is None:\n return False\n return True", "def test_annot():\n annots = ['aparc', 'aparc.a2005s']\n for a in annots:\n annot_path = pjoin(data_path, \"label\", \"%s.%s.annot\" % (\"lh\", a))\n labels, ctab, names = read_annot(annot_path)\n assert_true(labels.shape == (163842, ))\n assert_true(ctab.shape == (len(names), 5))", "def isAnalysisRecipe(self):\r\n return True" ]
[ "0.54971373", "0.53127843", "0.52745575", "0.5235069", "0.5188993", "0.5121418", "0.509365", "0.50501126", "0.50026417", "0.49532452", "0.4941224", "0.49351087", "0.49332222", "0.49167177", "0.4914973", "0.48895016", "0.48684993", "0.486381", "0.4858225", "0.48537436", "0.4831539", "0.4830078", "0.48098224", "0.4790561", "0.47705412", "0.47689116", "0.47588217", "0.47436336", "0.47430623", "0.47267562" ]
0.5548691
0
Returns True if setHook has been called, else False.
def hooked(self): return hasattr(self, 'hook')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hooked(self):\n return hasattr(self, \"hook\")", "def is_hooked(self):\n return self.is_hook", "def has_hookscript ( self ):\n return self.hook_script_ref is not None", "def __bool__(self):\n return any(\n getattr(self, hook_trigger, None) for hook_trigger in self._hook_triggers\n )", "def validated_hook(self) -> Callable[[bool], None]:\n return self._validated_hook", "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def __bool__(self) -> bool:\n return self._rpc is not None", "def enable_defaults ( self ):\n # not strict: missing hooks are ignored\n success = False\n if self.hook_root:\n success = True\n default_hooks = self.hook_root.get_default_scripts()\n for event, hooks in default_hooks.items():\n if not self.link_hooks_v ( event, hooks ):\n success = False\n # -- end if\n\n return success", "def __bool__(self):\n\n return bool(self.__history)", "def is_onset(self):\n if self._onset:\n self._reset_onset = True\n return True\n return False", "def is_setter(self):\n return self._is_setter", "def isSetValue(self):\n return _libsbml.FluxBound_isSetValue(self)", "def final_check(self):\n for func in self.called.keys():\n self.assertTrue(self.called[func], \"%s was not called\" % (func,))", "def _run_hook(self, value): # type: (Any) -> Tuple[bool, Any]\n if self._hook is None:\n return True, value\n # noinspection PyBroadException\n try:\n return True, self._hook(value)\n except Exception:\n return False, self._default", "def should_trigger(self, previous_result, *_args, **_kwargs):\n return self.extension.config.get('enabled', True)", "def has_off_hook_warning(self) -> bool:", "def before_exit(hook):\n\n global before_exit_has_run\n if before_exit_has_run == 0:\n before_exit_has_run = 1\n # We can force now, since any handling for not force has already been\n # handled\n GPS.exit(force=1, status=exit_status)\n return True", "def isCall(self) -> bool:\n ...", "def _can_set(self, key, value):\n return not bool(self._set_callback(key, value))", "def link_was_called(self):\n return self._link_was_called", "def webhook(self) -> bool:\n return self._webhook", "def process_hooks(self, hooks):\n try:\n enabled_hooks = self.project.HOOKS\n except AttributeError:\n return hooks", "def isSet(self) -> bool:\n ...", "def _implements_test_batch_hooks(self):\n return not is_default(self.on_test_batch_begin) or not is_default(\n self.on_test_batch_end\n )", "def set_hook(self,name,hook):\n\n # At some point in the future, this should validate the hook before it\n # accepts it. Probably at least check that the hook takes the number\n # of args it's supposed to.\n setattr(self.hooks,name,new.instancemethod(hook,self,self.__class__))", "def is_on(self) -> bool:\n return self._zone.data[\"mode\"] == \"override\" and self._zone.data[\"setpoint\"]", "def was_pressed(self) -> bool:\n return True", "def is_triggered(self) -> bool:\n raise NotImplementedError()", "def isInitialized(self):\n\t\tif self.isTypeSet and self.isCfgSet:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def on_hook(self) -> None:" ]
[ "0.8287972", "0.78261894", "0.71733844", "0.68210393", "0.6275487", "0.61548287", "0.6046052", "0.60282415", "0.58755386", "0.58091587", "0.5783909", "0.577299", "0.57432944", "0.57335156", "0.5725817", "0.57083863", "0.5678062", "0.56624424", "0.56512415", "0.5643167", "0.56344175", "0.5589281", "0.55823946", "0.5557711", "0.5553289", "0.55296177", "0.5525928", "0.5516822", "0.5512005", "0.5505288" ]
0.82772356
1
Triggers traffic to be sent asynchronously. This is not a blocking function.
def send_traffic_async(self, traffic, function): raise NotImplementedError( "The TrafficController does not implement", "the \"send_traffic_async\" function.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send(self):", "async def async_send(self, **kwargs):\n return await super().async_send(address=self._address, **kwargs)", "def send_traffic_async(self, traffic, function):\n self._logger.debug('send_traffic_async with ' +\n str(self._traffic_gen_class))\n\n for packet_size in self._packet_sizes:\n traffic['l2'] = {'framesize': packet_size}\n self._traffic_gen_class.start_rfc2544_throughput(\n traffic,\n trials=self._trials,\n duration=self._duration)\n self._traffic_started = True\n if len(function['args']) > 0:\n function['function'](function['args'])\n else:\n function['function']()\n result = self._traffic_gen_class.wait_rfc2544_throughput()\n result = TrafficControllerRFC2544._append_results(result,\n packet_size)\n self._results.append(result)", "async def async_send(self):\n return await super().async_send(data1=self._data1)", "def send(self, packet):\n self._loop.create_task(self.send_coro(packet))", "async def send(self, message):", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def start_sync(self):\r\n self.send_queue.put(('sync', time.time()))\r\n self.awaiting_sync = True", "def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()", "def send(self, url, data, headers):\n eventlet.spawn(self._send_payload, (url, data, headers))", "def done_sending(self):\r\n self._flush(True)", "async def send(self, *args, **kwargs) -> None:\n for callback in self:\n res = callback(*args, **kwargs)\n if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):\n await res", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def send(self, data, async=True, force=True):\n if self.channel is None:\n raise ChannelNotSetException()\n if self.event is None:\n raise EventNotSetException()\n\n # THIS NETWORK CALL SOMETIMES TAKES ~1second and MUST be tasked off asynchronously!\n #\n # send the data on the channel with the specified event name\n if not isinstance(data, dict):\n data = data.get_o()\n\n # get the linkedExpiringObjectQueueTable (ie: pbp+stats combiner\n # check if its the type of object we should throw in the pbp+stats linker queue\n if not force:\n raise Exception('We are attempting to use the removed LinkedExpiringObject logic.')\n\n # send it\n if async:\n # print('')\n # print('---------- pusher_send_task -----------')\n # print('| type: %s' % type(data))\n # print('| data: %s' % str(data))\n # print('----------------------------------------')\n # print('')\n # print('')\n countdown_seconds = 0\n if self.delay_seconds is not None:\n countdown_seconds = self.delay_seconds\n\n pusher_send_task.apply_async(\n (self, data),\n serializer='pickle',\n countdown=countdown_seconds)\n else:\n self.trigger(data)", "def proc_exec_async(cmd):\n\n envoy.connect(cmd)\n return None", "def sendAsync(self):\n url = \"https://chatbase.com/api/message\"\n\n async def _do_async_request():\n async with aiohttp.ClientSession() as session:\n async with session.post(url,\n data=self.to_json(),\n headers=Message.get_content_type()\n ) as resp:\n return await resp.text()\n\n asyncio.ensure_future(_do_async_request())", "def send_req(self):\n self.n_send_req += 1", "async def send(self) -> None:\n await self._mutations.send()\n await self._counters.send()", "def _send_request(self) -> None:\n logger.debug(f'Sent: {self.request.hex()} to {self.transport.get_extra_info(\"peername\")}')\n self.transport.sendto(self.request)\n asyncio.get_event_loop().call_later(self._retry_timeout, self.retry_mechanism)", "def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback", "async def sender(self):\n out = await self.output_queue.get()\n if not out.ready():\n logger.info(\">>> Requeuing {}\".format(out))\n await self.output_queue.put(out)\n await asyncio.sleep(0.05)\n return\n if out.expired():\n logger.info(\">>> Discarding {}\".format(out))\n out.discarded = True\n return\n content = [out.content] if type(out.content) is str else out.content\n logger.info(\">>> Sending:\\n{}\".format(content))\n await self.websocket.send(json.dumps(content))\n out.sent = True\n await asyncio.sleep(len(content) * 0.5)", "def after_send(self):", "def handle_write(self):\n self.initiate_send()", "async def send(self, request, **kwargs):\n priority = kwargs.pop('priority', None)\n if priority is None:\n future = self._executor.submit(\n self.send_blocking, request, **kwargs\n )\n else:\n LOG.debug(\n 'send: priority=%r, %r, kwargs=%r', priority, request, kwargs\n )\n future = self._executor.submit_with_priority(\n priority, self.send_blocking, request, **kwargs\n )\n future.set_finalizer(lambda response: response.close())\n return await adapters.FutureAdapter(future).get_result()", "async def _write_async(self, command: bytes):\n await self._serial.write_async(command)\n logger.debug(f\"Command {repr(command)} sent!\")", "def send(self) -> None:\n\n payload = self.get_payload()\n try:\n self.response = requests.get(url=FAST_API, params=payload)\n except requests.exceptions.ConnectionError:\n print(f\"requests.exceptions.ConnectionError! Trying again in 5 seconds...\")\n sleep(5)\n self.send()", "def send (self, data):\n return self.sending.send(data)", "def send_traffic(self, traffic):\n raise NotImplementedError(\n \"The TrafficController does not implement\",\n \"the \\\"send_traffic\\\" function.\")", "def sendAsync(self):\n url = (\"https://chatbase.com/api/messages?api_key=%s\" % self.api_key)\n\n async def _do_async_request():\n async with aiohttp.ClientSession() as session:\n async with session.post(url,\n data=self.to_json(),\n headers=Message.get_content_type()\n ) as resp:\n return await resp.text()\n\n asyncio.ensure_future(_do_async_request())" ]
[ "0.70518446", "0.658749", "0.64080757", "0.61358184", "0.6126272", "0.59462124", "0.59020275", "0.5884106", "0.5869222", "0.5818092", "0.57891905", "0.576021", "0.5733072", "0.57246214", "0.57081556", "0.5699909", "0.5676819", "0.5673217", "0.5659739", "0.5647509", "0.5610203", "0.55961657", "0.55749255", "0.5556674", "0.55527693", "0.55358225", "0.5521771", "0.5512616", "0.55019134", "0.548294" ]
0.7024093
1
Calcule le prix total en fonction du nombre de CD et de DVD
def prix(n_cd=0, n_dvd=0): if n_cd < SEUIL_CD: prix_cd = n_cd * PRIX_CD else: prix_cd = n_cd * PRIX_CD_SOLDE if n_dvd < SEUIL_DVD: prix_dvd = n_dvd * PRIX_DVD else: prix_dvd = n_dvd * PRIX_DVD_SOLDE return prix_cd + prix_dvd # prix total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_volume(self):", "def subtotal(self):\n return self.cantidad * self.precio", "def subtotal(self):\n return self.precio_unitario * self.cantidad", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def sub_total():\n return sum(SAVE_PRICE)", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal", "def total_volume(self) -> int:\n total = 0\n for i in self.order_items:\n total += i.total_volume\n return total", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableNC.rowCount()):\n subtotales.append(float(self.tableNC.item(row,2).text()))\n return sum(subtotales)", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def total_volume(self) -> int:\n return self.quantity * self.one_item_volume", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def patrimony_total(self):\n pass", "def somme(self) -> Numeric:\n return query_sum(self.offres(), \"prix\", output_field=models.DecimalField())", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def total_factura(self):\r\n total = 0\r\n for i in self.get_lineas():\r\n precio_articulo = i.get_articulo().get_precio()\r\n total += precio_articulo * i.get_cantidad()\r\n return total", "def subtotal(balance,selected_product):\n balance = balance + ((selected_product[\"price\"]))\n return balance", "def get_total(self):\n total = super().get_total()\n\n if self.qty < 10:\n total += 3\n\n return total", "def subtotal(prices):\n\ttotal = Decimal(\"0.00\")\n\n\tfor price in prices:\n\t\ttotal += price[1]\n\n\treturn total", "def get_total(self):\n\n subtotal = super(InternationalMelonOrder, self).get_total()\n if self.qty < 10:\n total = subtotal + 3\n\n return total", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def ppcm_denominateurs(self):\n\t\tl = []\n\t\tn = 1\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\t\"\"\" les denominateurs sont positifs \"\"\"\n\t\t\t\te = m.get_coefficient().get_denom().valeur()\n\t\t\t\tif not (e in l):\n\t\t\t\t\tl.append(e)\n\t\t\t\tn *= e\n\t\treturn n / pgcd_liste(l)", "def update_total(self):\n self.objects[self.ids.AMOUNT].setText(\"Total Spend: \\xA3%.2f\" % (self.owner.total_price() / 100))" ]
[ "0.65616846", "0.65148675", "0.63571846", "0.6325566", "0.6288623", "0.628783", "0.62849903", "0.62757003", "0.62676924", "0.61933535", "0.6179521", "0.60922796", "0.60887057", "0.60808444", "0.60640484", "0.6055469", "0.60517925", "0.60244316", "0.6010513", "0.59640276", "0.5957207", "0.58697623", "0.5860395", "0.5853569", "0.5851051", "0.58345", "0.58156097", "0.5795722", "0.57742566", "0.5771166" ]
0.79513353
0
Testing the sum function with a list of integers
def test_sum_list_int(self): list_of_int = [1, 2, 3] result = sum(list_of_int) self.assertEqual(result, 6)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_int(self):\n data = [1, 2, 3]\n result = sum(data)\n self.assertEqual(result, 6)", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def sum(inputList):\n sum=0#the sum of the list starts from 0\n for num in inputList:\n sum=sum+num#add all number in the list\n print(\"the sum is\",sum)", "def task_8_sum_of_ints(data: List[int]) -> int:\n return sum(data)", "def MySum( l ):\n\n #checking if arg is a list\n if isinstance(l, list):\n\n #adding all numbs\n result = 0\n\n for numb in l:\n result = result + int(numb)\n\n return result\n\n else:\n return \"Argument is not a list\"", "def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_numbers(numbers):\n sum = 0\n for number in numbers:\n sum += number\n\n return sum", "def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum", "def sum_list(input_list: List[float]) -> float:\n sum: float = 0\n for i in input_list:\n sum = sum + i\n return sum", "def sum_all_element(my_list):\n result = 0\n for i in range(len(my_list)):\n result = result + int(my_list[i])\n return result", "def sum_items(numbers):\n total = 0\n for item in numbers:\n total += item\n return total", "def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)", "def add_list_numbers(incoming_list):\n if incoming_list:\n retval = sum(incoming_list)\n else:\n retval = 0\n return retval", "def test_running_sum_multi_positives(self):\n\n argument = [4,2,3,6]\n expected = [4,6,9,15]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only positive values\")", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def find_sum( *my_list):\n # a = len(my_list)- 2\n # i = 0\n # suma=0\n # for i in my_list :\n # suma += my_list[i]\n # i+=1\n # return suma\n return sum(my_list)", "def question_26(list_num: int) -> int:\n return sum(list_num)", "def sum_list(num_list):\n # return sum(num_list)\n sum_list = 0\n for number in num_list:\n sum_list += number\n print(sum_list)\n \n # code prints out the sum_list for each value, increasing by the value each time\n # final output is the sum of numbers\n # currently no output for '[]' as input ", "def test_running_sum_multi_zeros(self):\n argument = [0,0,0,0]\n expected = [0,0,0,0]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only zeros\")", "def sum_f(f, xs):\n sum_num = 0\n for num in xs:\n int_num = int(num)\n fun_num = f(int_num)\n sum_num = sum_num+fun_num\n return sum_num", "def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s", "def sum(*args):\n result = 0\n for i in args:\n result += i\n return result", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def add_list_numbers(incoming_list):\n # summation=0\n if incoming_list:\n summation = sum(incoming_list)\n else:\n summation = 0\n return summation", "def test_running_sum_multi_mix(self):\n argument = [4,0,2,-5,0]\n expected = [4,4,6,1,1]\n sums.running_sum(argument)\n self.assertEqual(expected,argument, \"the list contains a mixture of negative\"\n + \"and positive values. \")", "def test_total_integers(self):\n int_list = [5, 10, 50, 35]\n assert cr.total(int_list) == 100" ]
[ "0.83072984", "0.7623929", "0.7614517", "0.7613487", "0.7445052", "0.74247944", "0.7406183", "0.7406183", "0.7399114", "0.7382333", "0.73426735", "0.73413736", "0.7324949", "0.7321548", "0.728999", "0.7227425", "0.72244704", "0.7189016", "0.71679366", "0.71667075", "0.7156018", "0.71532184", "0.71429396", "0.7141126", "0.7134683", "0.70989746", "0.70711535", "0.70587665", "0.70362014", "0.7024751" ]
0.8694991
0
Testing the sum function with a list of fractions
def test_sum_list_fraction(self): list_of_fractions = [Fraction(1, 4), Fraction(1, 4), Fraction(1, 2)] result = sum(list_of_fractions) self.assertEqual(result, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sum_list_floats(self):\n\n list_of_floats = [1.2, 2.34, 2.001]\n result = sum(list_of_floats)\n\n self.assertEqual(result, 5.541)", "def test_add(self):\n newvalues = Fraction(1,2)+Fraction(1,2)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"4/4\")", "def fsum(items):\n return math.fsum(items)", "def question_23(list_num: float) -> float:\n return sum(list_num) / len(list_num)", "def test_fraction_math_ops(self):\n fract1 = source.Fraction(5, 3)\n fract2 = source.Fraction(2, 3)\n self.assertEqual(fract1 + fract2, source.Fraction(7, 3))\n self.assertEqual(fract1 + 5, source.Fraction(20, 3))\n self.assertEqual(3 + fract1, source.Fraction(14, 3))\n self.assertEqual(fract1 * fract2, source.Fraction(10, 9))\n self.assertEqual(5 * fract2, source.Fraction(10, 3))", "def fsum(iterable):\n return 0.0", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def div_sum(data: list) -> int:\n\n def even_quotient(nums: list) -> int:\n \"\"\"Finds the quotient of the only two numbers in the list that evennly divide.\"\"\"\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]\n\n total = 0\n for row in data:\n total += even_quotient(row)\n return total", "def split_and_sum(expression):\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total", "def test_total_floats(self):\n float_list = [5.1, 10.321, 50.0, 3.98, 4.4]\n self.assertAlmostEqual(cr.total(float_list), 73.801, places=2)", "def decimal_sum(*args):\n res = 0\n for numb in args:\n try:\n res += Decimal(str(numb))\n except:\n print(f\"Argument [ {numb} ] is skipped... not a float\")\n return res", "def test_div():\n l = [1, 2, 3, 4]\n assert s7.div(*l) == 1 / 2 / 3 / 4\n assert s7.div(100, 20) == 5\n assert s7.div(100.0, 20) == 5.0\n assert s7.div(100, 20.0) == 5.0", "def test_sum(n, m, o, result):\n from series import sum_series\n assert sum_series(n, m, o) == result", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def sumDivisor(inputList):\n result = 0\n for i in inputList:\n result += i\n return result", "def test_div(self):\n newvalues= Fraction(7,10)/Fraction(4,5)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"35/40\")", "def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum", "def sum_list(input_list: List[float]) -> float:\n sum: float = 0\n for i in input_list:\n sum = sum + i\n return sum", "def sumListInputs(factors=[3, 5], upper_bound=1000):\n t0 = time.time()\n multiples = []\n for num in range(1, upper_bound):\n for factor in factors:\n # add num to multiples if it is a multiple of any of the factors and has not already been added (in case it is a multiple of more than one factor)\n if num % factor == 0 and num not in multiples:\n multiples.append(num)\n ans = sum(multiples)\n t1 = time.time()\n print('sumListInputs: ' + str(t1-t0))\n return ans", "def test_I_fraction(self):\n nu = np.array([0, 1, 10, 101, 450, 1001])+0.5\n x = np.array([1e-4, 1, 1e2, 1e3])\n result = bessel_sk.i_fraction(x, nu)\n expected = np.zeros((len(nu), len(x)))\n for i in range(len(nu)):\n for j in range(len(x)):\n X = x[j]\n NU = nu[i]\n expected[i,j] = mpmath.besseli(NU, X)/mpmath.besseli(NU+1, X)\n assert_almost_equal(result/expected, 1)", "def sum_multiples(num):\n pass", "def test_sum_list_int(self):\n\n list_of_int = [1, 2, 3]\n result = sum(list_of_int)\n\n self.assertEqual(result, 6)", "def test_sum_expression(self):\n # The logic of SumExpression is checked in the above tests (which include\n # addition and subtraction). Here, we only check that constructing a\n # SumExpression flattens the list.\n structure_memoizer = {\n defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,\n defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)\n }\n\n term_values = [0, 1, 2, 3, 4]\n\n def create_dummy_expression(value):\n \"\"\"Creates an empty `Expression` with the given extra constraints.\"\"\"\n basic_expression_object = basic_expression.BasicExpression(\n [term.TensorTerm(value)])\n return expression.ExplicitExpression(basic_expression_object,\n basic_expression_object)\n\n expressions = [create_dummy_expression(value) for value in term_values]\n\n # Each of our Expressions contains exactly one term, so by checking its\n # value we can uniquely determine which subexpression is which.\n def term_value(expression_object):\n terms = expression_object.penalty_expression._terms\n self.assertEqual(1, len(terms))\n return terms[0].tensor(structure_memoizer)\n\n sum1 = expression.SumExpression([expressions[0], expressions[1]])\n sum2 = expression.SumExpression([expressions[2]])\n sum3 = expression.SumExpression([expressions[3]])\n sum4 = expression.SumExpression([expressions[4]])\n sum5 = expression.SumExpression([sum3, sum4])\n sum6 = expression.SumExpression([sum1, sum2, sum5])\n\n actual_expressions = sum6._expressions\n self.assertEqual(5, len(actual_expressions))\n for ii in xrange(5):\n self.assertEqual(ii, term_value(expressions[ii]))\n self.assertEqual(ii, term_value(actual_expressions[ii]))", "def sumSquares(aList):\r\n if isinstance(aList, list):\r\n total=0\r\n for value in aList:\r\n if(isinstance(value, int) or isinstance(value, float)) and abs(value)%3==0:\r\n total+=value**2\r\n return total\r\n else:\r\n return 'error'", "def test_add_floats(self):\n print(\"---running test_add_floats\")\n result = some_math.add(10.5, 2)\n assert result == 12.5", "def test_list_int(self):\n result = div(2, 4)\n self.assertEqual(result, 0.5)", "def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:\n return sum(mxd_lst)", "def test_running_sum_multi_mix(self):\n argument = [4,0,2,-5,0]\n expected = [4,4,6,1,1]\n sums.running_sum(argument)\n self.assertEqual(expected,argument, \"the list contains a mixture of negative\"\n + \"and positive values. \")", "def ratio_calculator(numerator, denominator):\n ratios = []\n for i in numerator:\n for j in denominator:\n if i[0] == j[0] and j[1] != 0:\n ratios.append([i[0], round(float(i[1]) / j[1], 3)])\n break\n elif i[0] == j[0]:\n ratios.append([i[0], 0])\n return ratios" ]
[ "0.72050667", "0.63461196", "0.6275821", "0.6267266", "0.6242244", "0.619898", "0.61726063", "0.61726063", "0.6136146", "0.60958403", "0.6044922", "0.60098386", "0.60013944", "0.59871864", "0.59814495", "0.59613097", "0.59255284", "0.59224653", "0.5904114", "0.58600134", "0.5854559", "0.5810534", "0.5804654", "0.5792771", "0.57252765", "0.5708859", "0.57047415", "0.56883925", "0.56845015", "0.5669283" ]
0.84436464
0
Testing the sum function with a list of float values
def test_sum_list_floats(self): list_of_floats = [1.2, 2.34, 2.001] result = sum(list_of_floats) self.assertEqual(result, 5.541)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n return sum(input_list)", "def sum_list(input_list: List[float]) -> float:\n sum: float = 0\n for i in input_list:\n sum = sum + i\n return sum", "def fsum(items):\n return math.fsum(items)", "def test_total_floats(self):\n float_list = [5.1, 10.321, 50.0, 3.98, 4.4]\n self.assertAlmostEqual(cr.total(float_list), 73.801, places=2)", "def fsum(iterable):\n return 0.0", "def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:\n return sum(mxd_lst)", "def add(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n summation = str(ft.reduce(oper.add,values))\n return summation", "def test_sum_list_fraction(self):\n\n list_of_fractions = [Fraction(1, 4), Fraction(1, 4), Fraction(1, 2)]\n result = sum(list_of_fractions)\n\n self.assertEqual(result, 1)", "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def sum_f(f, xs):\n sum = 0\n for x in xs:\n sum += f(x)\n return sum", "def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total", "def decimal_sum(*args):\n res = 0\n for numb in args:\n try:\n res += Decimal(str(numb))\n except:\n print(f\"Argument [ {numb} ] is skipped... not a float\")\n return res", "def test_sum_list_int(self):\n\n list_of_int = [1, 2, 3]\n result = sum(list_of_int)\n\n self.assertEqual(result, 6)", "def test_add_floats(self):\n print(\"---running test_add_floats\")\n result = some_math.add(10.5, 2)\n assert result == 12.5", "def split_and_sum(expression):\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total", "def compare_sum(values, weights):\n return np.sum(values.numpy())", "def sum(lists) -> list:\r\n return list(np.sum(lists, 0))", "def sum_f(f, xs):\n sum_num = 0\n for num in xs:\n int_num = int(num)\n fun_num = f(int_num)\n sum_num = sum_num+fun_num\n return sum_num", "def check_sum(cls, values):\n _v = [0 if v is None else v for v in values.values()]\n if abs(sum(_v) - 1) > 0.01:\n raise ValueError(\n f\"All values must approximately sum to 1. Sum to {sum(_v)}\"\n )\n return values", "def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result", "def test_running_sum_multi_zeros(self):\n argument = [0,0,0,0]\n expected = [0,0,0,0]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only zeros\")", "def lsquare_of_sums(inlist):\r\n s = sum(inlist)\r\n return float(s)*s", "def sumSquares(aList):\r\n if isinstance(aList, list):\r\n total=0\r\n for value in aList:\r\n if(isinstance(value, int) or isinstance(value, float)) and abs(value)%3==0:\r\n total+=value**2\r\n return total\r\n else:\r\n return 'error'", "def test_sum(n, m, o, result):\n from series import sum_series\n assert sum_series(n, m, o) == result", "def sum_values(values):\n return (sum(values))", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def test_list_int(self):\n data = [1, 2, 3]\n result = sum(data)\n self.assertEqual(result, 6)" ]
[ "0.8083567", "0.8083567", "0.7788616", "0.7374771", "0.7311574", "0.72328746", "0.7066626", "0.6972362", "0.6807591", "0.6759573", "0.67543155", "0.67068624", "0.66648847", "0.65459245", "0.6536975", "0.65316075", "0.644555", "0.6392924", "0.6362586", "0.6353249", "0.63093615", "0.62920046", "0.62817997", "0.62808037", "0.6265804", "0.6262655", "0.62620646", "0.6208531", "0.6187785", "0.6182394" ]
0.861433
0
Returns a sorted list of selfplay data directories.
def list_selfplay_dirs(base_dir): model_dirs = [os.path.join(base_dir, x) for x in tf.io.gfile.listdir(base_dir)] return sorted(model_dirs, reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def get_dirs():\n # join glob matchers\n dirnames = [\n str(dir_path.relative_to(get_data_dir()))\n for dir_path in get_data_dir().rglob(\"*\")\n if dir_path.is_dir()\n ]\n\n return dirnames", "def list_all():\n if os.path.exists(DATA_DIR):\n return os.listdir(DATA_DIR)\n return []", "def get_dirs(self, path):\n ds = []\n try:\n for d in os.listdir(path):\n if os.path.isdir(os.path.join(path, d)):\n ds.append(d)\n except OSError:\n pass\n ds.sort()\n return ds", "def get_directories(self):\n\t\tdirectories = []\n\t\tfor i in range(self.directoryModel.get_row_count()):\n\t\t\tdirectories.append((\n\t\t\t\t\tself.directoryModel.get_value(i, 'directoryTagName'),\n\t\t\t\t\tself.directoryModel.get_value(i, 'directory')\n\t\t\t\t\t))\n\t\treturn directories", "def list_dirs(self):\n return self.list_groups()", "def _candidate_dirs(self, key: CacheKey):\n return [os.path.join(d, str(key))\n for d in self.data_dirs]", "def get_dirs(self, **args):\n try:\n data_dir = args['data_dir']\n except:\n print('please provide data_dir in configuration file')\n return None, None\n\n dirs = []\n scan_inxs = []\n for name in os.listdir(data_dir):\n subdir = os.path.join(data_dir, name)\n if os.path.isdir(subdir):\n # exclude directories with fewer tif files than min_files\n if len(glob.glob1(subdir, \"*.tif\")) < self.min_files and len(glob.glob1(subdir, \"*.tiff\")) < self.min_files:\n continue\n last_digits = re.search(r'\\d+$', name)\n if last_digits is not None:\n scan = int(last_digits.group())\n if scan >= self.scan_range[0] and scan <= self.scan_range[1] and not scan in self.exclude_scans:\n dirs.append(subdir)\n scan_inxs.append(scan)\n # The directory with the smallest index is placed as first, so all data files will\n # be alligned to the data file in this directory\n scans_order = np.argsort(scan_inxs).tolist()\n first_index = scan_inxs.pop(scans_order[0])\n first_dir = dirs.pop(scans_order[0])\n scan_inxs.insert(0, first_index)\n dirs.insert(0, first_dir)\n return dirs, scan_inxs", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]", "def listdirs(self):\n return self.list_groups()", "def get_data_files():\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files", "def data_directories(self):\n\n return [simulation.outdir for simulation in self.simulations]", "def getFilesList(data):\n\n filesList = []\n\n if os.path.isdir(data):\n logging.info(\"Using files from \" + data)\n #Create a list containing the file names\n for root, dirs, files in os.walk(data):\n for filename in files:\n filesList.append(os.path.join(root,filename))\n\n else:\n logging.info(\"Using file \" + data)\n filesList.append(os.path.abspath(data))\n\n return sorted(filesList)", "def get_dir(self) -> typing.List[str]:\n if get_engineering_mode():\n return self.super_dir()\n return self.get_filtered_dir_list()", "def root_directory_list(self) -> str:\n return self.__root_directory_list", "def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path", "def directories(self):\n directories = list(set([\n '/'.join(f.split('/')[:-1]) for f in self.files\n ]))\n return sorted(directories)", "def get_dirnames(path):\n storage = DefaultStorage()\n dirnames = storage.listdir(path)[0]\n dirnames.sort()\n return dirnames", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)", "def list_directory(self, raw=False):\n return sorted(self.ftp.nlst(self.FTP_PATH))", "def _local_dir(self):\n return []", "def fetch_dset_dirs(dset_name=None):\n assert (dset_name is None) or (dset_name in DATASET_DIRS), \"invalid name\"\n\n dset_name = \"default\" if dset_name is None else dset_name\n\n home = os.path.expanduser(\"~\")\n\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])", "def get_directory_list(self):\r\n lines = []\r\n self.ftp.retrlines('LIST', lines.append)\r\n return lines", "def getdirs():\n dirs = [i for i in os.listdir(dname) if not \\\n os.path.isfile(os.path.join(dname, i))]\n return dirs", "def get_list_of_folders(self, end_of_folder_name):\n folder_list = [os.path.basename(f) for f in glob.glob(os.path.join(self.parent_folder, end_of_folder_name))]\n folder_list.sort()\n return folder_list", "def _listdirs(self, file):\n zf = zipfile.ZipFile(file)\n dirs = []\n for name in zf.namelist():\n dn = os.path.dirname(name)\n dirs.append(dn)\n dirs.sort()\n return dirs", "def get_all_file_paths_labels(data_root: str) -> list:\n\n speaker_dirs = os.listdir(data_root)\n all_files = []\n i = 0\n for d in speaker_dirs:\n files = glob.iglob(data_root + '/' + d + '/**/*.wav', recursive=True)\n files = [[f, i] for f in files]\n all_files += files\n i += 1\n all_files = sorted(all_files, key=lambda x:x[0], reverse=False)\n\n return all_files", "def getAllDirs(self):\n\n dirs = [ self ]\n for d in self._subdirs:\n if d.hasImages():\n dirs += d.getAllDirs()\n return dirs" ]
[ "0.7210885", "0.7076178", "0.6867692", "0.685712", "0.68322694", "0.6808806", "0.68082637", "0.6803831", "0.6774294", "0.6724709", "0.6708498", "0.6695982", "0.6552779", "0.65096354", "0.6490297", "0.64788204", "0.64532286", "0.64205575", "0.6420415", "0.6389557", "0.6384688", "0.6380452", "0.6364431", "0.6356881", "0.6341663", "0.6320443", "0.6304349", "0.62988967", "0.62704307", "0.6262696" ]
0.7105153
1
Waits for all of the awaitable objects (e.g. coroutines) in aws to finish. All the awaitable objects are waited for, even if one of them raises an exception. When one or more awaitable raises an exception, the exception from the awaitable with the lowest index in the aws list will be reraised.
def wait(aws): aws_list = aws if isinstance(aws, list) else [aws] results = asyncio.get_event_loop().run_until_complete(asyncio.gather( *aws_list, return_exceptions=True)) # If any of the cmds failed, re-raise the error. for result in results: if isinstance(result, Exception): raise result return results if isinstance(aws, list) else results[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(aws):\n\n aws_list = aws if isinstance(aws, list) else [aws]\n results = asyncio.get_event_loop().run_until_complete(asyncio.gather(\n *aws_list, return_exceptions=True))\n # If any of the cmds failed, re-raise the error.\n for result in results:\n if isinstance(result, Exception):\n raise result\n return results if isinstance(aws, list) else results[0]", "async def wait_graceafully(aws, timeout=None):\n\n while True:\n done, pending = await asyncio.wait(aws, timeout=15)\n\n for t in done:\n if t.exception():\n print(\"exception:\", patch.task_get_name(t), t.exception())\n elif t.result():\n print(\"result:\", patch.task_get_name(t), t.result())\n\n if not pending:\n break\n\n for t in pending:\n t.cancel()\n\n aws = pending", "def await(self):\n self.simulation_queue.join()\n return self.result_queue, self.fail_queue", "def swait_multiple(cos):\n asyncio.get_event_loop().run_until_complete(asyncio.wait(cos))", "def wait_all(futures, timeout=None):\n return get_all(futures, timeout)", "def wait_for_results(\n request,\n awaitables: List[Awaitable],\n timeout: int = 3,\n cancel_unfinished: bool = True\n) -> List[Any]:\n done, not_done = wait_for_awaitables(\n request, awaitables, timeout, cancel_unfinished\n )\n \n return [ftr.result() for ftr in done]", "def joinall(greenlets, timeout=None, raise_error=False, count=None):\n if not raise_error:\n return wait(greenlets, timeout=timeout, count=count)\n\n done = []\n for obj in iwait(greenlets, timeout=timeout, count=count):\n if getattr(obj, 'exception', None) is not None:\n if hasattr(obj, '_raise_exception'):\n obj._raise_exception()\n else:\n raise obj.exception\n done.append(obj)\n return done", "def wait_for_awaitables(\n request,\n awaitables: List[Awaitable],\n timeout: int = 3,\n cancel_unfinished: bool = True\n) -> Tuple[List[asyncio.Future], List[asyncio.Future]]:\n running = [\n asyncio.run_coroutine_threadsafe(\n awaitable,\n loop=request.loop\n )\n for awaitable in awaitables\n ]\n \n done, not_done = futures.wait(running, timeout=timeout)\n \n if cancel_unfinished:\n for ftr in not_done:\n ftr.cancel()\n \n return done, not_done", "async def wait_for_complete(self, workers: Iterable[Worker] | None = None) -> None:\n\n await asyncio.gather(*[worker.wait() for worker in (workers or self)])", "def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()", "def wait(self) -> None:\n if self.futures:\n wait(self.futures, return_when='FIRST_COMPLETED').done", "def wait_tasks_to_finish(tasks_list):\n for task in tasks_list:\n task.wait_for_terminated()", "async def run_all_clients():\n completed_clients = 0\n for client_result in asyncio.as_completed(clients):\n completed_clients += await client_result\n return completed_clients", "async def run_with_gather():\n # the following are executed sequentially\n\n # timeout will generate concurrent.futures._base.TimeoutError, which kills the process\n try:\n await asyncio.wait_for(sleep_some(7), timeout=1)\n except BaseException as ex:\n print('timeout exception:', type(ex), traceback.format_exc())\n\n await asyncio.gather(sleep_some(5), sleep_some(4), sleep_some(3))\n await sleep_some(2)", "async def wait_until_done(self) -> None:\n ...", "async def wait_for_uuids(self, application, uuids):\n debug(\"Waiting for uuids for {}: {}\".format(application, uuids))\n waitfor = len(uuids)\n finished = 0\n\n while waitfor > finished:\n for uid in uuids:\n await asyncio.sleep(10)\n\n if uuid not in self.state[application]['actions']:\n self.state[application]['actions'][uid] = \"pending\"\n\n status = self.state[application]['actions'][uid]\n\n # Have we already marked this as done?\n if status in [\"pending\", \"running\"]:\n\n debug(\"Getting status of {} ({})...\".format(uid, status))\n status = await self.n2vc.GetPrimitiveStatus(\n self.ns_name,\n uid,\n )\n debug(\"...state of {} is {}\".format(uid, status))\n self.state[application]['actions'][uid] = status\n\n if status in ['completed', 'failed']:\n finished += 1\n\n debug(\"{}/{} actions complete\".format(finished, waitfor))\n\n # Wait for the primitive to finish and try again\n if waitfor > finished:\n debug(\"Waiting 10s for action to finish...\")\n await asyncio.sleep(10)", "def wait(self, jobs):\n while True:\n try:\n for job in self.query(jobs=jobs):\n if job['status'] == 'completed':\n jobs.remove(job['uuid'])\n yield (job)\n except JobClientError as e:\n logger.error(e.message)\n\n if len(jobs) > 0:\n time.sleep(self._status_update_interval_secs)\n else:\n break", "async def run_multiple_tasks():\n tasks = []\n for i in range(5,2,-1):\n tasks.append(asyncio.create_task(sleep_some(i)))\n\n for t in tasks:\n await t\n print('all done')", "def multi_future(children, quiet_exceptions=()):\n if isinstance(children, dict):\n keys = list(children.keys())\n children = children.values()\n else:\n keys = None\n children = list(map(convert_yielded, children))\n assert all(is_future(i) for i in children)\n unfinished_children = set(children)\n\n future = Future() # it's a collectAll future\n if not children:\n future.set_result({} if keys is not None else [])\n\n def callback(f_child):\n unfinished_children.remove(f_child)\n if not unfinished_children: \n # all child futures are done!\n result_list = []\n for f in children:\n try:\n result_list.append(f.result())\n except Exception as e:\n if future.done():\n if not isinstance(e, quiet_exceptions):\n logging.error(\"Multiple exceptions in yield list\")\n else:\n future.set_exc_info(sys.exc_info())\n if not future.done():\n if keys is not None:\n future.set_result(dict(zip(keys, result_list)))\n else:\n future.set_result(result_list)\n\n listening = set()\n for child in children:\n if child not in listening:\n listening.add(child)\n child.add_done_callback(callback) # when child future is done, tell parent future\n return future", "def completed_prefetch(self, blocking_wait=False, max_yield=999):\n\n for worker, obj_ref in self.completed(blocking_wait=blocking_wait):\n self._fetching.append((worker, obj_ref))\n\n for _ in range(max_yield):\n if not self._fetching:\n break\n\n yield self._fetching.popleft()", "def __await__(self):\n return self.waiter.__await__()", "def __await__(self):\n return self.waiter.__await__()", "def join(self):\r\n if self._unfinished_tasks > 0:\r\n yield from self._finished.wait()", "async def main():\n futures = [fetch_ip(service) for service in SERVICES]\n # Schedule tasks with Wait\n # Retrieve results from the coroutine. Use done, pending. \n done, pending = await asyncio.wait( \n futures, return_when=FIRST_COMPLETED\n )\n print(done.pop().result())", "def wait_threads(self):\n\n self.queue.join()", "async def test_exectution_limit_single_wait(\n coresys: CoreSys, loop: asyncio.BaseEventLoop\n):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n\n @Job(limit=JobExecutionLimit.SINGLE_WAIT)\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n\n test = TestClass(coresys)\n\n await asyncio.gather(*[test.execute(0.1), test.execute(0.1), test.execute(0.1)])", "def _check_awaiting(self):\r\n # TODO: check for wait loops\r\n for w in list(self._awaiting.values()):\r\n self._try_register_platform(w[\"instance\"], w[\"kind\"], w[\"parent\"], w[\"wait\"], awaiting=True)", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()" ]
[ "0.6854887", "0.64489365", "0.60607123", "0.59655637", "0.5942415", "0.58964247", "0.58717173", "0.5787581", "0.5619449", "0.5607443", "0.55679595", "0.551879", "0.54877734", "0.5482153", "0.5470917", "0.540138", "0.5392612", "0.5390896", "0.5383947", "0.53785133", "0.53709394", "0.53709394", "0.536037", "0.5333732", "0.53311986", "0.5330808", "0.5323492", "0.5312741", "0.5312741", "0.5312741" ]
0.67563105
1
Check to see that only warn and info output appears in the stream. The first line may start with WARN] or WARNING] depending on whether 'WARN' has been registered as a global log level. See options_bootstrapper.py.
def assertWarnInfoOutput(self, lines): self.assertEqual(2, len(lines)) self.assertRegexpMatches(lines[0], '^WARN\w*] warn') self.assertEqual('INFO] info', lines[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_for_warnings(self):\n log = str()\n print(self.default_starter_args + self.arguments)\n if not self.log_file.exists():\n print(str(self.log_file) + \" not there. Skipping search\")\n return\n print(str(self.log_file))\n with self.log_file.open(errors=\"backslashreplace\") as log_f:\n for line in log_f.readline():\n if \"WARN\" in line or \"ERROR\" in line:\n print(line.rstrip())\n log += line.rstrip()\n attach(log, \"WARN or ERROR lines from starter log\")", "def no_log_warn(logical_line):\n\n msg = (\"M352: LOG.warn is deprecated, please use LOG.warning!\")\n if \"LOG.warn(\" in logical_line:\n yield (0, msg)", "def no_log_warn(logical_line):\n\n msg = (\"G330: LOG.warn is deprecated, please use LOG.warning!\")\n if \"LOG.warn(\" in logical_line:\n yield (0, msg)", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def warn(cls, message, *args, **kwargs):\n\n if cls.print_level > 1:\n print(cls.marker_theme.warning() + cls.time() + cls.parse(message))", "def _filter_info_warning(lines):\n lines = list(filter(lambda x: 'RuntimeWarning' not in x, lines))\n return lines", "def warning(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['warning']:\n self.print_lines(self.colored(('magenta', 'bold'), lines))", "def warn(self, *args):\n\n if self.is_on(_Log.WARN):\n self._write(self._out, *args)", "def test_warn():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.warn(\"Hello World!\")\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(WARN_VIOLATION)))", "def test_common_case(self):\n loglevel_from_command_line = \"WARNING\"\n assert output(self.msg, \"INFO\", loglevel_from_command_line)", "def _filter_stdout(self, stdout):\n def is_important_line(line):\n warnings_to_ignore = [\n 'Unable to load library icui18n',\n ]\n for warning in warnings_to_ignore:\n if warning in line:\n return False\n return True\n\n return [line for line in stdout.strip().split('\\n')\n if line and is_important_line(line)]", "def warn():\n pass", "def warnings():\n return THE_LOGGER.warnings", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def warn(self, module, message):\n if self.log_level <= consts.LOG_LEVEL_WARN:\n print(\"WARN : %s: %s\" % (module, message))", "def hearWarning(self, warnlvl, screenname):\n print screenname,\"warned us\"", "def has_warnings_active(self) -> bool:", "def warn(*args: object, output: TextIO = sys.stdout, spaces_between: bool = True, end_line: bool = True):\n logline(*args, output=output, spaces_between=spaces_between, end_line=end_line, warning_mode=True)", "def format_warn(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='magenta', start='[WARN] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)", "def warn(self) -> bool:\n return self._warn", "def warn(self) -> list:\n return self.__wrn", "def print_warn(txt):\n print(\"\\n[WARN] \" + txt + \"\\n\", file=sys.stderr)", "def warning(self, msg, *args):\n if self.lvl<=logging.WARNING: return self._log(msg, *args)" ]
[ "0.72353804", "0.6597861", "0.651159", "0.64776546", "0.64776546", "0.64776546", "0.64776546", "0.64776546", "0.64776546", "0.64776546", "0.64776546", "0.6448697", "0.6411868", "0.63813674", "0.63161284", "0.63127244", "0.6260447", "0.625881", "0.6250657", "0.6216382", "0.61502236", "0.6137961", "0.6132356", "0.61284757", "0.6111459", "0.60914534", "0.60281616", "0.6012496", "0.5992551", "0.59730387" ]
0.7015353
1
Parses instruction's arguments using instructionargument module
def parse_args(self, instruction: ET.Element): self.__args = [] cur_order = [] for child in instruction: arg = Argument(child) if arg.order in cur_order: raise UnexpectedXMLStructure( 'Wrong order of argument element "{}"'.format(arg.data) ) self.__args.append(arg) cur_order.append(arg.order) self.__args.sort(key=lambda x: x.order) if len(self.__args) > 0: if self.__args[0].order != 1: raise UnexpectedXMLStructure("Wrong order of argument element") self.verify()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def decode_instruction(instruction):\n if not instruction.endswith(INST_TERM):\n raise InvalidInstruction('Instruction termination not found.')\n\n # Use proper encoding\n instruction = utf8(instruction)\n\n # Get arg size\n elems = instruction.split(ELEM_SEP, 1)\n\n try:\n arg_size = int(elems[0])\n except Exception:\n # Expected ValueError\n raise InvalidInstruction(\n 'Invalid arg length.' +\n ' Possibly due to missing element separator!')\n\n arg_str = elems[1][:arg_size]\n\n remaining = elems[1][arg_size:]\n\n args = [arg_str]\n\n if remaining.startswith(ARG_SEP):\n # Ignore the ARG_SEP to parse next arg.\n remaining = remaining[1:]\n elif remaining == INST_TERM:\n # This was the last arg!\n return args\n else:\n # The remaining is neither starting with ARG_SEP nor INST_TERM.\n raise InvalidInstruction(\n 'Instruction arg (%s) has invalid length.' % arg_str)\n\n next_args = GuacamoleInstruction.decode_instruction(remaining)\n\n if next_args:\n args = args + next_args\n\n return args", "def Args(parser):", "def load_args(init_regs: Registers, memory: Memory, args: List[str]):\n init_regs[\"$a0\"] = len(args) # argc\n\n argv: List[int] = []\n for arg in args:\n ptr = memory.extend_stack(bytesify(arg))\n argv.append(ptr)\n\n argv.append(0)\n\n for idx, ptr in enumerate(argv[::-1]):\n memory.extend_stack(bytesify(ptr, size=4), align_data=True)\n\n init_regs[\"$a1\"] = memory.ram[\"stack\"][\"stops\"] # argv", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def add_arguments(self, parser):", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def get_arg(instruction, itype):\n\n if itype == itypes.family_code:\n return instruction[7:2]\n elif itype == itypes.opcode:\n return instruction[7:]\n elif itype == itypes.funct3:\n return instruction[15:12]\n elif itype == itypes.funct7:\n return instruction[32:25]\n elif itype == itypes.rs1:\n return instruction[20:15]\n elif itype == itypes.rs2:\n return instruction[25:20]\n elif itype == itypes.imm12lo:\n return concat(instruction[32], instruction[7], instruction[31:27])\n elif itype == itypes.imm12hi:\n return concat(instruction[27:25], instruction[12:8])\n elif itype == itypes.instruction_id:\n return instruction[15:12]\n elif itype == itypes.rd:\n return instruction[12:7]\n elif itype == itypes.imm12:\n return instruction[32:20]\n elif itype == itypes.imm12_sb:\n return concat(instruction[32:25], instruction[12:7])\n elif itype == itypes.imm20:\n return concat(instruction[31], instruction[20:12], instruction[20], instruction[31:21])\n elif itype == itypes.imm20_pc:\n return instruction[31:12]\n elif itype == itypes.shamtw:\n return instruction[25:20]\n elif itype == itypes.shamt:\n return instruction[25:20]\n else:\n return None", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def __add_arguments__(cls, parser):", "def fetch_arguments(op_def, arg, ws):\n return [fetch_argument(op_def, desc, ws) for desc in arg.strings]", "def _pre_argument_parsing(self):\n pass", "def parse_int(self,ins):\n if ins.instr == 'add': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'addi': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'addu': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n elif ins.instr == 'addiu':\n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sub': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'subu': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mult': \n if len(ins.args) == 2:\n self.gen = [Register(\"$hi\"),Register(\"$lo\")]\n self.need = [ins.args[0], ins.args[1]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'multu':\n if len(ins.args) == 2:\n self.gen = [Register(\"$hi\"),Register(\"$lo\")]\n self.need = [ins.args[0], ins.args[1]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'div': \n if len(ins.args) == 2:\n self.gen = [Register(\"$hi\"),Register(\"$lo\")]\n self.need = [ins.args[0], ins.args[1]]\n elif len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1], ins.args[2]] \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'divu': \n if len(ins.args) == 2:\n self.gen = [Register(\"$hi\"),Register(\"$lo\")]\n self.need = [ins.args[0], ins.args[1]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'and': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'andi': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'or': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'ori': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'xor': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'xori': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'nor': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sll': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sllv': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'srl': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'srlv': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sra': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'srav': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'slt': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'slti': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sltu': \n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n if self.is_reg(ins.args[2]):\n self.need = [ins.args[1], ins.args[2]]\n else: \n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sltiu':\n if len(ins.args) == 3:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n self.ival = ins.args[2]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def _parse_args(self, prepared_args):\n pass", "def _post_argument_parsing(self):\n pass", "def parse_args():\r\n parser = argparse.ArgumentParser(description=\"Available Options\")\r\n\r\n parser.add_argument('-i'\r\n ,'--input_path'\r\n ,dest='input_path'\r\n ,type=is_valid_path\r\n ,required=True\r\n ,help = \"Enter the path of the image file to process\")\r\n\r\n args = vars(parser.parse_args())\r\n\r\n #To Display The Command Line Arguments\r\n print(\"## Command Arguments #################################################\")\r\n print(\"\\n\".join(\"{}:{}\".format(i,j) for i,j in args.items()))\r\n print(\"######################################################################\")\r\n\r\n return args", "def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def _argument_parsing():\n print('Generating spirograph...')\n parser = argparse.ArgumentParser(description=description_string)\n parser.add_argument(\n parser_name_or_flag,\n nargs=mumber_of_arguments,\n dest=parser_destination,\n required=False,\n help=helper_string\n )\n args = parser.parse_args()\n return args", "def parse_misc(self,ins):\n if ins.instr == 'nop':\n if len(ins.args) != 0:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n elif ins.instr == 'syscall':\n if len(ins.args) != 0:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n elif ins.instr == 'break':\n if len(ins.args) != 0:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n elif ins.instr == 'mflo': \n if len(ins.args) == 1:\n self.gen = [ins.args[0]]\n self.need = [Register(\"$lo\")]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'mtlo': \n if len(ins.args) == 1:\n self.gen = [Register(\"$lo\")]\n self.need = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'mfhi': \n if len(ins.args) == 1:\n self.gen = [ins.args[0]]\n self.need = [Register(\"$hi\")]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mthi': \n if len(ins.args) == 1:\n self.gen = [Register(\"$hi\")]\n self.need = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'mtc1': \n if len(ins.args) == 2:\n self.gen = [ins.args[1]]\n self.need = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mfc1': \n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.need = [ins.args[1]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'la': \n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.ival = ins.args[1]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lui': \n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.ival = ins.args[1]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)", "def parse_args():\n\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='operation',\n help='Run AtPKI {command} -h for additional help')\n\n parse_bin_parser = subparsers.add_parser(\"parse_bin\",\n help=\"parse generated PKI bin\")\n parse_bin_parser.add_argument(\"--bin_file\", \"-b\", default=\"PKI.bin\",\n help=\"bin_file which need to be parsed\")\n parse_bin_parser.add_argument(\"--output_path\", \"-o\",\n help=\"output path of parsed bin file in from_bytes mode\")\n\n generate_bin_parser = subparsers.add_parser(\"generate_bin\",\n help=\"create PKI bin\")\n generate_bin_parser.add_argument(\"--bin_file\", \"-b\", default=\"PKI.bin\",\n help=\"bin_file which need to be parsed\")\n generate_bin_parser.add_argument('pki_list', metavar='<type> <file>',\n help='type (ca, cert, key,)'\n ' and file, separated by space',\n action=PKIPairAction)\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def parse_argument():\n parser = argparse.ArgumentParser(description='Parsing a file.')\n parser.add_argument('--train', nargs=1, required=True)\n parser.add_argument('--test', nargs=1, required=True)\n args = vars(parser.parse_args())\n return args", "def convert_instruction(instruction: str) -> Tuple[int, int, int]:\n\t# NOOP\n\tif match := NOOP_REGEX.match(instruction):\n\t\tinstruction_type = 0\n\t# ADD\n\telif match := ADD_REGEX.match(instruction):\n\t\tinstruction_type = 1\n\t# MINUS\n\telif match := MINUS_REGEX.match(instruction):\n\t\tinstruction_type = 2\n\t# GOTO\n\telif match := GOTO_REGEX.match(instruction):\n\t\tinstruction_type = encode_label(match.group(\"TARGET\")) + 2\n\t# No match\n\telse:\n\t\traise ValueError(f\"Unrecognized instruction: {instruction}\")\n\t# get a and c from the label and variable capture groups\n\tlabel = encode_label(match.group(\"LABEL\"))\n\tvariable = encode_var(match.group(\"VAR\")) - 1\n\treturn label, instruction_type, variable", "def parse_in_argument_lines(\n argument_line: str, file_import: Optional[Any] = None\n ) -> Tuple[Optional[InputArgument], Any]:\n regex_args_with_type = r\"^(?: *|\\t)(?P<name>\\*{0,4}(\\w+|\\w+\\s|\\w+\\.\\w+\\s)\\((?P<type>.*)\\)):(?P<desc>(\\s|\\S)*)\"\n argument_sections = re.findall(\n regex_args_with_type, argument_line, re.MULTILINE\n )\n if len(argument_sections) < 1:\n regex_args_no_type = r\"^(?: *|\\t)(?P<name>)(\\w+|\\w+\\s|\\w+\\.\\w+\\s|\\w+\\.\\w+):(?P<desc>(\\s|\\S)*)\"\n argument_sections = re.findall(\n regex_args_no_type, argument_line, re.MULTILINE\n )\n if len(argument_sections) < 1:\n return None, None\n else:\n name = argument_sections[0][1].strip()\n description = argument_sections[0][2].strip()\n return InputArgument(name=name, description=description), None\n else:\n name = argument_sections[0][1].strip()\n description = argument_sections[0][3].strip()\n input_type_str = argument_sections[0][2]\n try:\n if file_import and input_type_str in dir(file_import):\n input_type = file_import.__getattribute__(input_type_str)\n else:\n input_type = eval(input_type_str)\n except Exception as err:\n logger.debug(\n f\"[yellow]Problems parsing input type {input_type_str}, setting isArray=False.\"\n f\"Error was: {err}[/yellow]\"\n )\n input_type = None\n\n return InputArgument(name=name, description=description), input_type", "def parse(self, args):\n pass", "def arguments(self):\n return parse_arguments(self['data'])", "def parse_node(self):\n\tline_splitted = self.line.split()\n\ti=t=w=s=l=v=None\n\tfor argument_unit in line_splitted:\n\t words = self.split_argument_unit(argument_unit)\n\n\t if words[0] == 'I':\n\t\ti = words[1]\n\t elif words[0] == 't':\n\t\tt = float(words[1])\n\t elif words[0] == 'W':\n\t\tw = words[1]\n\t elif words[0] == 's':\n\t\ts = words[1]\n\t elif words[0] == 'L':\n\t\tl = words[1]\n\t elif words[0] == 'v':\n\t\tv = int(words[1])\n\t else:\n\t\traise ArgumentNotFoundError(found = words[0])\n\tif i != None:\n\t self.nodes.append(Node(i, t, w, s, l, v))\n\telse:\n\t ArgumentNotFoundError(self.line, 'I = identifier expected')", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )" ]
[ "0.69461066", "0.6887815", "0.67382395", "0.6320202", "0.6312284", "0.6152274", "0.6121402", "0.61088896", "0.60927796", "0.6092592", "0.608646", "0.6078641", "0.6063982", "0.60591644", "0.5996822", "0.59701645", "0.59372264", "0.59354186", "0.5913147", "0.58877", "0.5884104", "0.58757126", "0.5862787", "0.5848902", "0.5844158", "0.58366716", "0.5818815", "0.57967407", "0.5795366", "0.5793461" ]
0.7026168
0
Gets either a run or a compile task from the API
def getTask(): content = requests.get(MANAGER_URL+"task", params={"apiKey": API_KEY}).text if content == "null": return None else: return json.loads(content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def fusion_api_get_task(self, param='', uri=None, api=None, headers=None):\n if uri is not None:\n # update fully qualified URL to relative URI\n uri = re.sub('^https://\\d*.\\d*.\\d*.\\d*', '', uri)\n return self.task.get(uri=uri, api=api, headers=headers, param=param)", "def get_task(self, id):\n raise NotImplementedError()", "def get_task(self, u_name):\n raise NotImplementedError()", "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def get_output_task(self, name='0'):\n port = self.get_output(name).other\n if port is None:\n return None\n return port.task", "def get_input_task(self, name='0'):\n port = self.get_input(name).other\n if port is None:\n return None\n return port.task", "def get(self) -> Task: # pragma: no cover\n raise NotImplementedError", "def task(path, **kwargs):\n\n # Get model configuration\n config = None\n if isinstance(path, (list, tuple)) and hasattr(path[0], \"config\"):\n config = path[0].config\n elif isinstance(path, str):\n config = AutoConfig.from_pretrained(path, **kwargs)\n\n # Attempt to resolve task using configuration\n task = None\n if config:\n architecture = config.architectures[0] if config.architectures else None\n if architecture:\n if any(x for x in [\"LMHead\", \"CausalLM\"] if x in architecture):\n task = \"language-generation\"\n elif \"QuestionAnswering\" in architecture:\n task = \"question-answering\"\n elif \"ConditionalGeneration\" in architecture:\n task = \"sequence-sequence\"\n\n return task", "def get_task(self, code: str) -> \"Task\": # noqa: F821\n if code not in self.tasks:\n raise PyDSTaskNoFoundException(\n \"Task with code %s can not found in process definition %\",\n (code, self.name),\n )\n return self.tasks[code]", "def get_run(arn=None):\n pass", "def task(self):\n return import_path_to_callable(self.func)", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def getTask(self, name):\n for t in self.tasks:\n if isinstance(name, str):\n if t.name == name:\n return t\n else:\n if t.__class__ is name:\n return t\n return None", "def run_task(self) -> Task:", "def get_task(self):\n\n url='{url}/task'.format(url=config.SERVER_URL)\n\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except Exception as e:\n check_server() # sleep until server is available\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to connect to server, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n if 'no task' in res: # if server have no task uid ,return 'no task uid'\n err_str= 'error: client -> get_task : ' \\\n 'unable to get task, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n try: # try to parse task str\n res=res.split(',')\n self.task_uid=res[0]\n self.task_type=res[1]\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to split task str,exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)", "def task(self) -> base_model.BaseTask:\n return self._task", "def task():", "def task(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"task\")", "def get_task_type(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskType', self.handle)", "async def get_task_result(task_id: TaskId):", "def run(self):\n if self.type_task == \"Api-request\":\n self.config = ConfigApiRequestTask(**self.dynamic_configs)\n self.task = ApiRequestTask(\n priority=0, # fixed priority\n config=self.config\n )\n elif self.type_task == 'Db':\n self.config = ConfigDbTask(self.dynamic_configs)\n self.task = DbTask(\n priority=0,\n config=self.config\n )\n elif self.type_task == 'File':\n self.config = ConfigFileTask(self.dynamic_configs)\n self.task = FileTask(\n priority=0,\n config=self.config\n )\n \n try:\n self.result = self.task.execute()\n except Exception as e:\n self.errors = str(e)\n self.logger.error(f'Error executing task: {self.errors}')\n return False\n \n res = self.save_into_db()\n return res", "def make_task(self):\n return Task()", "def task():\n pass", "def task():\n pass", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "def get_task_fun(self):\n app_config = self.integration.service.app_config\n try:\n return app_config.periodic_tasks[self.name]\n except KeyError:\n pass", "def package(self):\n if self.method == 'buildNotification':\n return self.params[1]['name']\n if self.method in ('createImage', 'image', 'livecd'):\n return self.params[0]\n if self.method == 'indirectionimage':\n return self.params[0]['name']\n # params[0] is the source URL for these tasks:\n if self.method not in ('build', 'buildArch', 'buildContainer',\n 'buildMaven', 'buildSRPMFromSCM', 'maven'):\n return None\n # (I wish there was a better way to do this.)\n source = self.params[0]\n o = urlparse(source)\n # build tasks can load an SRPM from a \"cli-build\" tmpdir:\n if source.endswith('.src.rpm'):\n srpm = os.path.basename(source)\n (name, version, release) = srpm.rsplit('-', 2)\n # Note we're throwing away version and release here. They could be\n # useful eventually, maybe in a \"Package\" class.\n return name\n # or an allowed SCM:\n elif o.scheme:\n package = os.path.basename(o.path)\n if package.endswith('.git'):\n package = package[:-4]\n if self.method == 'buildContainer':\n package += '-container'\n return package\n raise ValueError('could not parse source \"%s\"' % source)", "def tasks_rpc():\n # First check that this is a legitimate request from the coordinator\n authenticate_coordinator()\n action, task_id, release_id = validate_action(request.get_json(force=True))\n # Call into action\n return ROUTES[action](task_id, release_id)" ]
[ "0.616161", "0.60326207", "0.6017457", "0.5974819", "0.5968861", "0.596335", "0.5956364", "0.58446765", "0.5822358", "0.5801867", "0.57799363", "0.57604116", "0.5759838", "0.57033783", "0.5691822", "0.5687267", "0.56643313", "0.5601746", "0.5589945", "0.5588639", "0.55747354", "0.5570129", "0.5561492", "0.5543544", "0.553499", "0.553499", "0.5513834", "0.55137014", "0.5493464", "0.5474384" ]
0.61274016
1
Posts the result of a compilation task
def compileResult(userID, didCompile, language): r = requests.post(MANAGER_URL+"compile", data={"apiKey": API_KEY, "userID": userID, "didCompile": int(didCompile), "language": language})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result(self, result: osbuild.pipeline.BuildResult):", "def return_result(self, compiled_result, response_channel):\n return self.worker.publish(\n channel=response_channel, message=compiled_result)", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def v2_runner_on_ok(self, result, **kwargs):\n host = result._host\n task = result._task\n output = result._result\n if result._result.get('changed', False):\n status = 'changed'\n else:\n status = 'ok'\n self.results.append({\"host\": host.name, \"action\":task.action, \"status\":status, \"output\": output})", "def execute(self):\n # Do the task that the module is suppose to do.\n\n # Return a message telling the user that the task is done.\n return \"\\nTemplate module did ...\"", "def buildFinished(sb):", "def process(self):\n # return ProcessorResult(True, _('OK'))\n return (True, _(\"OK\"))", "def buildFinished(builderName, build, results):", "def compile(self):\n print \"+ Nothing to be done ;)\"", "def post_postprocessor(result=None, **kw):\n logger.info(\"start post_postprocessor\")\n logger.info(result)\n logger.info(\"end post_postprocessor\")\n pass", "def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )", "def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)", "def execute(self):\n\t\treturn \"done\"", "def compilation(self, config: Config, files: List[str]) -> CallbackResult:\n return [], files", "def submit(self, pid, source):\n # анхны pid -г хадгалч авах\n real_pid = pid\n\n # pid -нд санамсаргүй угтвар залгах\n letters = string.ascii_lowercase\n pid += ''.join(random.choice(letters) for i in range(5))\n \n # С код болгох\n out = pid + \".c\"\n fw = open(out, \"w\")\n fw.write(source)\n fw.close()\n \n # compile хийж программаа гаргах (exe файл үүсгэх)\n res = pid + \"output\"\n com = \"gcc \" + out + \" -o \" + pid + \" 2> \" + res\n r = os.system(com)\n\n # source алдаатай эсэхийг шалгах\n fr = open(res, \"r\") \n compile_error = fr.read()\n fr.close()\n\n if (compile_error):\n rm = \"rm -rf \" + pid + \" \" + out + \" \" + res\n os.system(rm)\n correct = False\n score = 0\n return {'correct': correct,\n 'score': score,\n 'msg': compile_error}\n\n # программ isolate дотор ажиллуулах\n cp = \"sudo cp -rf \" + pid + \" /var/local/lib/isolate/0/box/\"\n os.system(cp)\n \n # running program in isolate\n # sand = \"./sol.sh\" + \" problem_\" + real_pid + \" \" + pid + \" > \" + res\n sand = \"./media/./sol.sh\" + \" ./media/problem_\" + real_pid + \" \" + pid + \" > \" + res\n os.system(sand)\n\n # pass counter\n fr = open(res, \"r\") \n ret = fr.read()\n fr.close()\n\n # бүх үүсгэсэн файл, програмуудыг устгах\n rm = \"rm -rf \" + pid + \" \" + out + \" \" + res\n os.system(rm)\n\n correct = True\n ret_dict = json.loads(ret)\n for i in ret_dict : \n a = i\n\n b = ret_dict[str(a)]\n score = int(a) / int(b)\n print(score)\n msg = str(a) + \"/\" + str(b)\n print(msg)\n return {'correct': correct,\n 'score': score,\n 'msg': msg}", "def compile_and_run(self, desired_result, input, limit):\n cfg = desired_result.configuration.data\n compile_result = self.compile(cfg, 0)\n return self.run_precompiled(desired_result, input, limit, compile_result, 0)", "def compile_upload_sequence(self, sequence):\n\n # First check if all values have been replaced in sequence:\n if not sequence.is_ready():\n self.hd.log.error(\"Sequence is not ready: Not all placeholders have been replaced.\")\n return\n\n self.module.set('compiler/sourcestring', sequence.sequence)\n # Note: when using an AWG program from a source file\n # (and only then), the compiler needs to\n # be started explicitly with awgModule.set('compiler/start', 1)\n while self.module.getInt('compiler/status') == -1:\n time.sleep(0.1)\n\n if self.module.getInt('compiler/status') == 1:\n # compilation failed, raise an exception\n self.hd.log.warn(self.module.getString('compiler/statusstring'))\n return\n\n if self.module.getInt('compiler/status') == 0:\n self.hd.log.info(\n \"Compilation successful with no warnings, will upload the program to the instrument.\"\n )\n if self.module.getInt('compiler/status') == 2:\n self.hd.log.warn(\n \"Compilation successful with warnings, will upload the program to the instrument.\"\n )\n self.hd.log.warn(\n f\"Compiler warning: {self.module.getString('compiler/statusstring')}\"\n )\n\n # Wait for the waveform upload to finish\n time.sleep(0.2)\n i = 0\n while (self.module.getDouble('progress') < 1.0) and (self.module.getInt('elf/status') != 1):\n self.hd.log.info(\"{} progress: {:.2f}\".format(i, self.module.getDouble('progress')))\n time.sleep(0.2)\n i += 1\n self.hd.log.info(\n \"{} progress: {:.2f}\".format(i, self.module.getDouble('progress'))\n )\n if self.module.getInt('elf/status') == 0:\n self.hd.log.info(\"Upload to the instrument successful.\")\n if self.module.getInt('elf/status') == 1:\n self.hd.log.warn(\"Upload to the instrument failed.\")", "def assemble(compilers, paths, output, promoted):\n\n\t# assemble C compile command\n\tcommand = []\n\tcommand.append(compilers['cc'])\n\n\tcommand.extend(['-I', paths['include']])\n\n\tif output:\n\t\tcommand.extend(['-o', output])\n\n\tcommand.extend(promoted)\n\n\t# execute compiler\n\treturn subprocess.call(command)", "def post_task_run(self, results, extra_events: Optional[dict] = None):\n\n if extra_events is None:\n extra_events = {}\n\n # No need to expose the RETURN_KEYS_KEY\n try:\n del results[RETURN_KEYS_KEY]\n except (TypeError, KeyError):\n pass\n\n # Print the post-call header\n self.print_postcall_header(results)\n\n # Send a custom task-succeeded event with the results\n if not self.request.called_directly:\n self.send_event('task-results', firex_result=convert_to_serializable(results), **extra_events)\n self.send_firex_data(self.abog)", "def run(self, distribution_root):\n self.result = self._compile_and_execute(distribution_root)\n return self.result", "def compile_python(self):\n if(self.input == \"\"):\n stdout = subprocess.run(\n [\"python\", self.id+\".py\"], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n if(len(stdout) == 0):\n self.output = subprocess.run(\n [\"python\", self.id+\".py\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n self.status = 0 # error\n else:\n self.status = 1 # success\n else:\n pass", "def compile_do(self):\r\n self.__advance()\r\n self.__compile_subroutine_call()\r\n self.__vmwriter.write_pop(\"temp\", 0)\r\n self.__advance()", "def build(parameters):\n\n\n print(\"In Build module\")", "def task():\n\n\tprint('Example task executed.')", "def post(self, request):\n result = None\n print(\"RESULT API: \", request.data)\n task_exec_update = TaskExecutionResult.objects.get(\n id=request.data['context']['taskExecutionID']\n )\n try:\n if request.data['result'].lower() == \"pass\":\n result = apisettings.PASS\n if request.data['result'].lower() == \"fail\":\n result = apisettings.FAIL\n if request.data['result'].lower() == \"abort\":\n result = apisettings.ABORT\n\n task_exec_update.result = result\n task_exec_update.save(update_fields=['result'])\n Log.summary_task_result(context=request.data.get(\"context\"), result=request.data['result'])\n return Response(status=HTTP_200_OK)\n except Exception as e:\n logger = Log.get_logger(__name__)\n logger.exception(e)\n return Response(status=HTTP_400_BAD_REQUEST)", "def run_compiler(executable):\n\n command = executable + sys.argv[1:]\n logging.debug('compilation: %s', command)\n result = subprocess.call(command)\n logging.debug('compilation exit code: %d', result)\n return result", "def stepFinished(build, step, results):", "def getCompile(uid):\n if webapp.config['COMPILE_OFF']:\n return returnError(\"Compilation Features are not available\", 400)\n\n logger.debug(\"[FLASKWEB] Retrieving last compilation status\")\n\n result = db.getCompiles(uid=uid)\n if len(result) == 0:\n result = db.getCompiles(uid=AppID.getUID(uid))\n\n if len(result) == 0:\n return returnError(\"No output found for compilation, %s\\n\\n\" % uid, 400)\n else:\n output = result[0]\n output['uname'] = AppID.getAppId(output['name'], output['uid'])\n local = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], output['uname'])\n output['sandbox'] = sorted (os.listdir(local))\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(output), 200\n else:\n return render_template(\"last.html\", lastcompile=output)", "def on_task_output(cls, task: Task, config: dict) -> None:", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())" ]
[ "0.6297503", "0.6123374", "0.6079198", "0.5799119", "0.5607617", "0.5598355", "0.5551113", "0.55006796", "0.54995465", "0.5477447", "0.54718935", "0.5466789", "0.5461721", "0.5413972", "0.53827256", "0.5374379", "0.5362462", "0.53495663", "0.5337475", "0.5330759", "0.5323037", "0.5311156", "0.5309521", "0.5308659", "0.52744657", "0.52312845", "0.52213913", "0.52028", "0.52006215", "0.5191526" ]
0.6791586
0
Append base url to partial url
def get_full_url(self, part_url): return BASE_URL + part_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_short_url_base(url):", "def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))", "def get_short_url_base():", "def generate_full_url(base_url, lineage, segment):\n params = \"/\".join([lineage, segment])\n return urljoin(base_url, params)", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def __url(self, *els):\n\n urls = [str(el) for el in els]\n urls.insert(0, self.BASE_URL)\n\n return '/'.join(s.strip('/') for s in urls)", "def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url", "def set_base_url(url):\n global BASE_URL\n if url is not None:\n BASE_URL = '/'.join((url.split(\"/\")[:-1])) + \"/\"", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def __get_full_url(self, operation, slug_params):\n return (self.base_url + operation[1]) % slug_params", "def _url(self, path):\n \n return self.url + path", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def get_base_url(self):\n return self.base_url", "def set_base_url(self, base_url):\n\n while base_url[-1] == '/':\n base_url = base_url[:-1]\n self.url = base_url\n self._update_children_url()", "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]", "def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)", "def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def urljoin(base, *path):\n return reduce(_join, path, base)", "def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")", "def url(self) -> str:\n return f\"{self._get_base_url()}{self.path_extension}\"", "def urljoin(cls, base, end):\r\n if base and not base.endswith(\"/\"):\r\n base = base + \"/\"\r\n return urljoin(base, str(end))", "def baseurl(request):\n if request.is_secure():\n scheme = 'https://'\n else:\n scheme = 'http://'\n\n return {'BASE_URL': scheme + request.get_host(),}", "def urljoin(cls, base, end):\n\n if base and not base.endswith('/'):\n base = base + '/'\n return urljoin(base, str(end))", "def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url", "def _get_URL_base(self, request, step):\n index = request.path.find(step.slug)\n\n return request.path[:index]", "def concat_url(endpoint, url):\n u = \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))\n return u.rstrip('/')", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))" ]
[ "0.7237278", "0.705202", "0.70063066", "0.698653", "0.6804967", "0.6804967", "0.6779443", "0.6767789", "0.67246985", "0.6697949", "0.6675118", "0.66569734", "0.6630495", "0.6603647", "0.6587251", "0.65712684", "0.65546143", "0.65124065", "0.6508288", "0.6486769", "0.64825237", "0.64611745", "0.6438129", "0.6414577", "0.6395342", "0.6382422", "0.63736314", "0.63650405", "0.6351187", "0.63487303" ]
0.72533536
0
Gets stats about additions and deletions Commit url to be used can be set via commit_url or generated using both full_name and commit_sha If all parameters are set, commit_url will be used, the others ignored
def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''): if commit_url == '' and (commit_sha == '' and full_name == ''): raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set') return None url = commit_url if url == '': url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name) url = self.get_full_url(url) json_data = loads(self.get_from_net(url)) stats = {'additions': 0, 'deletions': 0} if 'stats' in json_data: stats['additions'] = json_data['stats']['additions'] stats['deletions'] = json_data['stats']['deletions'] return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_commit_stats(self):\n return self.commit_stats", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def main(*args):\n\n # Default options\n repos = []\n user_map = NullUserMap()\n plotout = None\n printout = True\n gitlab = None\n\n if \"--help\" in args:\n print(main.__doc__)\n return 0\n\n # Parse command-line \n it = iter(args)\n for a in it:\n if a == \"--users\":\n user_map = FileUserMap(next(it))\n elif a == \"--pdf\":\n plotout = next(it)\n elif a == \"--noprint\":\n printout = False\n elif a == \"--gitlab\":\n gitlab = next(it), next(it)\n else:\n repos.append(a)\n \n # Setup backend\n if gitlab is None:\n coretype = GitCLIBackend\n coreargs = repos\n else:\n coretype = GitlabBackend\n coreargs = gitlab\n\n # Dictionary for storing the data to be presented\n commits = {}\n \n # Find the bound for searching -- the beginning of the week, one year ago\n today = datetime.now().replace(hour=0, minute=0,second=0,microsecond=0)\n year_ago = today.replace(year = today.year - 1)\n _, __, dow = year_ago.isocalendar()\n year_ago-= timedelta(days=(dow-1))\n \n # Processes the git logs and stores some intermediate results in the three\n # dictionaries instantiated above\n for email, date, stats in coretype(*coreargs, since=year_ago.strftime(\"%Y-%m-%d\")):\n \n # Trim date of commit to midnight of that day\n date = date.replace(hour=0,minute=0,second=0,microsecond=0)\n user = user_map.map(email)\n \n if not user in commits:\n commits[user] = {}\n if not date in commits[user]:\n commits[user][date] = 0\n \n commits[user][date] += 1\n \n # Print plaintext report\n if printout:\n \n for user, cal in commits.items():\n \n print(\"Annual summary for %s\" % (user))\n \n for date, count in sorted(cal.items(), key=lambda x: x[0]):\n strdate = date.strftime(\"%x\")\n print(\" %s: %2d commits\" % (strdate, count))\n \n print(\"\")\n\n # Draw plots\n if plotout is not None:\n\n with PdfPages(plotout) as pdf:\n \n labels = []\n offsets = {}\n \n cdict = ((205.,247.,237.), (15.,191.,148.))\n \n cdict = {\n 'red': (\n (0.0, cdict[0][0]/255, cdict[0][0]/255),\n (1.0, cdict[1][0]/255, cdict[1][0]/255)\n ),\n 'green':(\n (0.0, cdict[0][1]/255, cdict[0][1]/255),\n (1.0, cdict[1][1]/255, cdict[1][1]/255)\n ),\n 'blue': (\n (0.0, cdict[0][2]/255, cdict[0][2]/255),\n (1.0, cdict[1][2]/255, cdict[1][2]/255)\n )\n }\n \n plt.register_cmap(name='Sea', data=cdict)\n colormap = plt.get_cmap('Sea')\n \n min_yr, min_week, _ = year_ago.isocalendar()\n max_yr, max_week, _ = today.isocalendar()\n \n week_counts = {yr: weeks_in_year(yr) for yr in range(min_yr, max_yr+1)}\n \n # Generate labels for each week -- \n # Add year to the label of the first week of each year as well as \n # the very first week in the history\n lastmon = None\n for yr, weeks in sorted(week_counts.items(), key=lambda x: x[0]):\n cur = datetime(year=yr, month=1, day=4) # jan 4 is always in week 1 of the iso year\n for i in range(weeks):\n mon = cur.strftime(\"%b\")\n if mon != lastmon:\n labels.append(cur.strftime(\"%b\"))\n else:\n labels.append(\"\")\n offsets[(yr, i+1)] = len(offsets)\n cur += timedelta(days=7)\n lastmon = mon\n \n for user in commits:\n \n fig = plt.figure(figsize=(7.5, 1.65))\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[8, 1]) \n ax, cax = plt.subplot(gs[0]), plt.subplot(gs[1])\n \n maxcommits = ceil(max(commits[user].values()) * 1.5)\n \n for date, count in commits[user].items():\n yr, wk, dow = date.isocalendar()\n offset = offsets[(yr, wk)]\n \n ax.add_patch(\n patches.Rectangle(\n (offset+0.05, dow - 1 + 0.05),\n 0.95, 0.95,\n linewidth=0,\n facecolor=colormap(1. * (count - 1) / (maxcommits) )\n )\n )\n \n ax.set_title(\"Commit summary for %s\" % user, y=1.28)\n \n ax.xaxis.tick_top()\n ax.set_xticks([x for x in np.arange(len(offsets)) if labels[int(x)] != \"\"])\n ax.set_xticks(np.arange(len(offsets)), minor=True)\n \n ax.set_xticklabels([x for x in labels if x != \"\"])\n ax.set_xlim(offsets[(min_yr, min_week)], offsets[(max_yr, max_week)]+1)\n\n ax.set_ylim(0, 7)\n ax.set_yticks(np.arange(7))\n ax.set_yticklabels([\"S \",\"M \",\"T \",\"W \",\"R \",\"F \",\"S \"])\n ax.invert_yaxis()\n \n if maxcommits <= 10:\n top = maxcommits\n step = 1.\n else:\n top = (maxcommits - 1) + 11 - ((maxcommits - 1) % 11)\n step = top/11.\n\n colorticks = np.arange(0., top+(step/2), step) / (top)\n colorlabels = [\"%d\" % (x*top) for x in colorticks]\n \n cbar = colorbar.ColorbarBase(\n cax, cmap=colormap,\n orientation='horizontal'\n )\n cbar.set_ticks(colorticks)\n cbar.set_ticklabels(colorlabels)\n cax.set_xlim(colorticks[0], colorticks[-1])\n \n for label in ax.get_xticklabels():\n label.set_horizontalalignment('left')\n \n for label in ax.get_yticklabels():\n label.set_horizontalalignment('center')\n label.set_verticalalignment('top')\n \n for item in (\n [ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() +\n cax.get_xticklabels()\n ):\n item.set_fontsize(7)\n \n ax.title.set_fontsize(10)\n fig.subplots_adjust(top=0.7, bottom=0.15)\n \n pdf.savefig(fig)\n\n return 0", "def __gitStatistics(self):\n self.vcs.gitStatistics(self.project.getProjectPath())", "def get_repo_info(loader, sha, prov_g):\n user_repo = loader.getFullName()\n repo_title = loader.getRepoTitle()\n repo_desc = loader.getRepoDescription()\n contact_name = loader.getContactName()\n contact_url = loader.getContactUrl()\n commit_list = loader.getCommitList()\n licence_url = loader.getLicenceURL() # This will be None if there is no license\n\n # Add the API URI as a used entity by the activity\n if prov_g:\n prov_g.add_used_entity(loader.getRepoURI())\n\n prev_commit = None\n next_commit = None\n version = sha if sha else commit_list[0]\n if commit_list.index(version) < len(commit_list) - 1:\n prev_commit = commit_list[commit_list.index(version) + 1]\n if commit_list.index(version) > 0:\n next_commit = commit_list[commit_list.index(version) - 1]\n\n info = {\n 'version': version,\n 'title': repo_title,\n 'description': repo_desc,\n 'contact': {\n 'name': contact_name,\n 'url': contact_url\n } \n }\n if licence_url:\n info['license'] = {\n 'name': 'License',\n 'url': licence_url\n }\n\n if type(loader) is GithubLoader:\n basePath = '/api-git/' + user_repo + '/'\n basePath += ('subdir/' + loader.subdir + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n if type(loader) is GitlabLoader:\n basePath = '/api-gitlab/' + user_repo + '/query/' \n basePath += ('branch/' + loader.branch + '/') if loader.branch else ''\n basePath += ('subdir/' + loader.subdir.strip('/') + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n elif type(loader) is LocalLoader:\n basePath = '/api-local/'\n elif type(loader) is URLLoader:\n basePath = '/api-url/'\n else:\n # TODO: raise error\n glogger.error('Cannot set basePath, loader type unkown')\n\n return prev_commit, next_commit, info, basePath", "def get_git_info():\n\n diff = \"Could not extract diff\"\n githash = '00000'\n try:\n # Refers to the global qc_config\n PycQEDdir = pq.__path__[0]\n githash = subprocess.check_output(['git', 'rev-parse',\n '--short=10', 'HEAD'], cwd=PycQEDdir)\n diff = subprocess.run(['git', '-C', PycQEDdir, \"diff\"],\n stdout=subprocess.PIPE).stdout.decode('utf-8')\n except Exception:\n pass\n return githash, diff", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def calc_test(commits, author):\n\topen('modifications.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 5 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t\t# getting every blob from a given commit\n\t\tquery = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +\n\t\t\t# splitting it and discarding the newlines and the commit's hash\n\t\t\t'awk -v RS=\"[;\\\\n]\" 1 | tail -n+2); do ' +\n\t\t\t# We look up the content's of each blob, and discard the STDERR,\n\t\t\t# in the case of trying to look up a blob that does not exist in the database\n\t\t\t'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +\n\t\t\t# We search for the use of a unit testing library, using the above regex, and\n\t\t\t# keeping the first result only, since that is enough to know that the commit contains\n\t\t\t# a unit testing file, to make the execution faster\n\t\t\t'egrep -m 1 \"' + final_reg + '\"')\n\t\tif bash(query): # if contains unit testing lib\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\t# at this point we could search the parent's tree for the existence of tests, but this\n\t\t\t# would require recursively looking at every directory and parsing every file in the tree, so, due\n\t\t\t# to the complexity, we skip it and consider it a modification instead of a possible introduction\n\n\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\tprint 'modification'\n\t\t\tf.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def get_churn_per_commit(dateshas, excludestr):\n\tprint \"sha;date;churn\" # CSV header line\n\ttotal = 0\n\tfor date, sha in dateshas:\n\t\tcommit = None\n\t\tif excludestr:\n\t\t\t# Example command with filtering:\n\t\t\t# git show abcde -w -C --name-status --format=format: \n\t\t\t#\t\tOutputs all the changed files with just their filenames, \n\t\t\t#\t\tas paths from the repository root. -w flag ignores \n\t\t\t#\t\twhitespace differences, -C flag detects move moves and \n\t\t\t#\t\trenames and ignores those.\n\t\t\t# cut -f2,3:\n\t\t\t#\t\tCuts out the filename (column 2) and the rename \n\t\t\t#\t\tdestination (column 3, if exists). This is done to not \n\t\t\t#\t\thave the M/A/D/R modification indicator from the \n\t\t\t#\t\t--name-status output.\n\t\t\t# grep -v '^Documentation/':\n\t\t\t#\t\tFilters out all the files which are in the specified \n\t\t\t#\t\tfolders.\n\t\t\t# xargs -L 500 git show abcde -w -C --shortstat -- dummy\n\t\t\t#\t\txargs carries all the files that grep outputs over to git \n\t\t\t#\t\tshow, which formats the\tresult into a line of the form \n\t\t\t#\t\t'X files changed, Y insertions(+), Z deletions(-)'.\n\t\t\t#\t\tUsing xargs because OS X has a wonky and unpredictable \n\t\t\t#\t\targument list length limit,\tso this should makes the \n\t\t\t#\t\tscript more portable. 'dummy' is specified to ensure an \n\t\t\t#\t\tempty set from grep does not lead to 'git show' showing \n\t\t\t#\t\teverything.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--name-status', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcut = subprocess.Popen(['cut', '-f2,3'], stdin=show.stdout, \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tgrep = subprocess.Popen(['grep', '-v', excludestr], \n\t\t\t\t\t\tstdin=cut.stdout, stdout=subprocess.PIPE)\n\t\t\txargs = subprocess.Popen(['xargs', '-L', '500', 'git', 'show', \n\t\t\t\t\t\tsha, '-w', '-C', '--shortstat', \n\t\t\t\t\t\t'--format=format:', '--', 'dummy'], \n\t\t\t\t\t\tstdin=grep.stdout, stdout=subprocess.PIPE)\n\t\t\tcommit = xargs.stdout.readlines()\n\t\telse:\n\t\t\t# If there is no excludestr, we can simply ask for the shortstat \n\t\t\t# information.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--shortstat', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcommit = show.stdout.readlines()\n\n\t\t# Remove leading/trailing newlines\n\t\tcommit = [x[:-1] for x in commit if x != '\\n']\n\n\t\t# Because of the xargs approach, there might be multiple result \n\t\t# lines. Iterate over all of them and sum the churn. That is, if there \n\t\t# are actually results left after directory filtering\n\t\tchurn = 0\n\t\tfor line in commit:\n\t\t\tif len(line) > 0:\n\t\t\t\ttry:\n\t\t\t\t\tadded = int(line.split()[3])\n\t\t\t\texcept:\n\t\t\t\t\tadded = 0\n\t\t\tchurn += added\n\t\tif churn > 0:\n\t\t\ttotal += churn\n\t\t\tprint \"%s;%s;%d\" % (sha[:8],str(date), churn)\n\n\treturn total", "def get_commit_data(owner, repo, ref, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/commits/{ref}'\n return get_whole_response_as_json(url, session)", "def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )", "def get_commit_stats_for_contributor(repo_full_name, contributor_id):\n org_name, repo_name = repo_full_name.split('/')\n try:\n contributors = get_repo_contributors(org_name, repo_name)\n except NoContent:\n raise NoContributorsError(\n \"Nobody has contributed to this repository yet\",\n ) from None\n\n try:\n contributor_stats = [\n stats for stats in contributors\n if stats['author']['id'] == contributor_id\n ][0]\n except IndexError:\n raise ContributorNotFoundError(\n \"No such contributor in this repository\",\n ) from None\n\n totals = merge_dicts(*contributor_stats['weeks'])\n\n return totals['c'], totals['a'], totals['d']", "def commits(self):\r\n url = '{0}/commits'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def get_commits(): # pragma: no cover\n global commit_data\n all_commits = 0\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n while all_commits == 0:\n url = 'https://api.github.com/repos/connormlewis/idb/stats/contributors'\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n total = entry['total']\n user_name = entry['author']['login']\n if user_name in team:\n team[user_name] = total\n all_commits += total\n return team, all_commits", "def current_commit():\n prepare_metrics(lambda: Metric('robot_commit', get_current_git_sha(), {'region': REGION_NAME}))", "def git_info_for_course(self, cdir):\r\n\r\n cmd = ''\r\n gdir = settings.DATA_DIR / cdir\r\n info = ['', '', '']\r\n\r\n # Try the data dir, then try to find it in the git import dir\r\n if not gdir.exists():\r\n gdir = path(git_import.GIT_REPO_DIR) / cdir\r\n if not gdir.exists():\r\n return info\r\n\r\n cmd = ['git', 'log', '-1',\r\n '--format=format:{ \"commit\": \"%H\", \"author\": \"%an %ae\", \"date\": \"%ad\"}', ]\r\n try:\r\n output_json = json.loads(subprocess.check_output(cmd, cwd=gdir))\r\n info = [output_json['commit'],\r\n output_json['date'],\r\n output_json['author'], ]\r\n except (ValueError, subprocess.CalledProcessError):\r\n pass\r\n\r\n return info", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url", "def log_git_info():\n try:\n git_dir = Path('.git')\n head_file = git_dir / 'HEAD'\n with head_file.open() as f:\n head_contents = f.readline().strip()\n log.info(f'Contents of .git/HEAD: {head_contents}')\n if head_contents.split()[0] == 'ref:':\n hash_file = git_dir / head_contents.split()[1]\n with hash_file.open() as f:\n log.info(f'Current reference hash: {f.readline().strip()}')\n except FileNotFoundError:\n return", "def add_git_info(run, scriptpath):\n try:\n repo = Repo(scriptpath, search_parent_directories=True)\n run[\"gitrepo\"] = repo.working_dir\n run[\"gitcommit\"] = repo.head.commit.hexsha\n run[\"gitorigin\"] = get_origin(repo)\n\n if not option_set('ignored metadata', 'diff'):\n whole_diff = ''\n diffs = repo.index.diff(None, create_patch=True)\n for diff in diffs:\n whole_diff += \"\\n\\n\\n\" + \"--- {}\\n+++ {}\\n\".format(\n diff.a_path, diff.b_path) + diff.diff.decode(\"utf-8\")\n\n run['diff'] = whole_diff\n except (InvalidGitRepositoryError, ValueError):\n # We can't store git info for some reason, so just skip it\n pass", "def get_compares_by_commit(commit_url):\n compare_sql = \"SELECT * from git_compare where commit_url=?\"\n return dbutils.execute_query(compare_sql, (commit_url,), DATABASE_FILE)", "def summary(self, *, branch: str = '', commit: str = '') -> None:\n self.__verify_repo_initialized()\n try:\n ppbuf = summarize.summary(self._env, branch=branch, commit=commit)\n except ValueError:\n if commiting.number_commits_recorded(self._env.refenv) == 0:\n ppbuf = StringIO()\n ppbuf.write(f'No commits have been made in the repository. \\n')\n ppbuf.write(f'Please make a commit and try again.')\n else:\n raise\n print(ppbuf.getvalue())\n return None", "def commit_names(self, commit):\n return []", "def get_repository_stats(repo, additional_params=[]):\n # Validate and compute additional params first.\n additional_params_total_weight = 0\n additional_params_score = 0\n for additional_param in additional_params:\n try:\n value, weight, max_threshold = [\n int(i) for i in additional_param.split(':')\n ]\n except ValueError:\n print('Parameter value in bad format: ' + additional_param,\n file=sys.stderr)\n sys.exit(1)\n additional_params_total_weight += weight\n additional_params_score += get_param_score(value, max_threshold,\n weight)\n\n created_since = repo.created_since\n updated_since = repo.updated_since\n contributor_count = repo.contributors\n org_count = len(repo.get_contributor_orgs())\n commit_frequency = repo.commit_frequency\n recent_releases_count = repo.recent_releases\n updated_issues_count = repo.updated_issues\n closed_issues_count = repo.closed_issues\n comment_frequency = repo.comment_frequency\n dependents_count = repo.get_dependents()\n\n total_weight = (CREATED_SINCE_WEIGHT + UPDATED_SINCE_WEIGHT +\n CONTRIBUTOR_COUNT_WEIGHT + ORG_COUNT_WEIGHT +\n COMMIT_FREQUENCY_WEIGHT + RECENT_RELEASES_WEIGHT +\n CLOSED_ISSUES_WEIGHT + UPDATED_ISSUES_WEIGHT +\n COMMENT_FREQUENCY_WEIGHT + DEPENDENTS_COUNT_WEIGHT +\n additional_params_total_weight)\n\n criticality_score = round(\n (get_param_score(created_since, CREATED_SINCE_THRESHOLD,\n CREATED_SINCE_WEIGHT) +\n get_param_score(updated_since, UPDATED_SINCE_THRESHOLD,\n UPDATED_SINCE_WEIGHT) +\n get_param_score(contributor_count, CONTRIBUTOR_COUNT_THRESHOLD,\n CONTRIBUTOR_COUNT_WEIGHT) +\n get_param_score(org_count, ORG_COUNT_THRESHOLD, ORG_COUNT_WEIGHT) +\n get_param_score(commit_frequency, COMMIT_FREQUENCY_THRESHOLD,\n COMMIT_FREQUENCY_WEIGHT) +\n get_param_score(recent_releases_count, RECENT_RELEASES_THRESHOLD,\n RECENT_RELEASES_WEIGHT) +\n get_param_score(closed_issues_count, CLOSED_ISSUES_THRESHOLD,\n CLOSED_ISSUES_WEIGHT) +\n get_param_score(updated_issues_count, UPDATED_ISSUES_THRESHOLD,\n UPDATED_ISSUES_WEIGHT) +\n get_param_score(comment_frequency, COMMENT_FREQUENCY_THRESHOLD,\n COMMENT_FREQUENCY_WEIGHT) +\n get_param_score(dependents_count, DEPENDENTS_COUNT_THRESHOLD,\n DEPENDENTS_COUNT_WEIGHT) + additional_params_score) /\n total_weight, 5)\n\n return {\n 'name': repo.name,\n 'url': repo.url,\n 'language': repo.language,\n 'created_since': created_since,\n 'updated_since': updated_since,\n 'contributor_count': contributor_count,\n 'org_count': org_count,\n 'commit_frequency': commit_frequency,\n 'recent_releases_count': recent_releases_count,\n 'closed_issues_count': closed_issues_count,\n 'updated_issues_count': updated_issues_count,\n 'comment_frequency': comment_frequency,\n 'dependents_count': dependents_count,\n 'criticality_score': criticality_score,\n }", "def get_changelog(self, commit_sha):\n\n url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG'\n url = url.format(HOST_GITHUB_RAW, self.repo, self.product)\n\n req = requests.get(url)\n lines = req.text\n\n first = self.latest_tags[self.num_comparisons - 1][VERS]\n last = self.latest_tags[self.num_comparisons - 2][VERS]\n flag = False\n\n log = ''\n for line in lines.splitlines():\n if first in line:\n flag = True\n if last in line:\n flag = False\n if flag:\n log += line + '\\n'\n return log", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_git_status(self) -> GitStatus:\n import gitdb.exc # type: ignore\n\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return GitStatus([], [], [], [])\n\n try:\n repo.rev_parse(self._base_commit)\n except gitdb.exc.BadName:\n raise ActionFailure(f\"Unknown git ref '{self._base_commit}'\")\n\n # Output of git command will be relative to git project root\n status_output = zsplit(\n git.diff(\n \"--cached\",\n \"--name-status\",\n \"--no-ext-diff\",\n \"-z\",\n \"--diff-filter=ACDMRTUXB\",\n \"--ignore-submodules\",\n self._base_commit,\n ).stdout.decode()\n )\n\n added = []\n modified = []\n removed = []\n unmerged = []\n while status_output:\n code = status_output[0]\n fname = status_output[1]\n trim_size = 2\n\n if not code.strip():\n continue\n if code == StatusCode.Untracked or code == StatusCode.Ignored:\n continue\n\n # The following detection for unmerged codes comes from `man git-status`\n if code == StatusCode.Unmerged:\n unmerged.append(self._fname_to_path(repo, fname))\n if (\n code[0] == StatusCode.Renamed\n ): # code is RXXX, where XXX is percent similarity\n removed.append(self._fname_to_path(repo, fname))\n fname = status_output[2]\n trim_size += 1\n added.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Added:\n added.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Modified:\n modified.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Deleted:\n removed.append(self._fname_to_path(repo, fname))\n\n status_output = status_output[trim_size:]\n debug_echo(\n f\"Git status:\\nadded: {added}\\nmodified: {modified}\\nremoved: {removed}\\nunmerged: {unmerged}\"\n )\n\n return GitStatus(added, modified, removed, unmerged)", "def extract_commits(self, repo_obj):\n url = REPO_COMMIT_LIST.format(full_name=repo_obj['full_name'])\n url = self.get_full_url(url)\n json_data = loads(self.get_from_net(url))\n commits = []\n for i in json_data:\n committer = i['committer']\n #stats = self.get_commit_change_stats(full_name=repo_obj['full_name'], commit_sha=i['sha'])\n stats = self.get_commit_change_stats(commit_url=i['url'])\n comm = {#TODO Fetch user's location in USER_URL\n 'date': self.get_commit_datetimezone(i['html_url']),\n 'user': i['commit']['committer']['name'],\n 'login': '',\n 'additions': stats['additions'],\n 'deletions': stats['deletions']\n }\n if committer is not None:\n comm['login'] = committer['login']\n\n commits.append(comm)\n return commits" ]
[ "0.6415148", "0.6115201", "0.6015644", "0.5979885", "0.5823878", "0.5778963", "0.5718678", "0.56915957", "0.56483024", "0.5626998", "0.559609", "0.55215144", "0.55175155", "0.5492246", "0.5487441", "0.5458346", "0.5453407", "0.54493964", "0.5440784", "0.5440086", "0.5401678", "0.5371209", "0.53532284", "0.5337917", "0.5329008", "0.5328105", "0.53277504", "0.5311627", "0.5301082", "0.5296888" ]
0.7875595
0
Extract repo commits from details in the repo object
def extract_commits(self, repo_obj): url = REPO_COMMIT_LIST.format(full_name=repo_obj['full_name']) url = self.get_full_url(url) json_data = loads(self.get_from_net(url)) commits = [] for i in json_data: committer = i['committer'] #stats = self.get_commit_change_stats(full_name=repo_obj['full_name'], commit_sha=i['sha']) stats = self.get_commit_change_stats(commit_url=i['url']) comm = {#TODO Fetch user's location in USER_URL 'date': self.get_commit_datetimezone(i['html_url']), 'user': i['commit']['committer']['name'], 'login': '', 'additions': stats['additions'], 'deletions': stats['deletions'] } if committer is not None: comm['login'] = committer['login'] commits.append(comm) return commits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commits_between(repo_path, start, end):\n \n git = subprocess.Popen([\"git\", \"log\", \"%s..%s\" % (start, end)], stdout=subprocess.PIPE, cwd=repo_path)\n log = git.stdout.read().decode(\"utf-8\")\n \n cur = None\n commits = []\n \n for line in log.splitlines():\n cm = re.match(r'commit ([a-f0-9]{40})', line)\n if cm is not None:\n if cur:\n commits.append(cur)\n cur = Commit(cm.group(1))\n \n if cur is not None and cm is None:\n if cur.message is None:\n if line.startswith(\"Author:\"):\n cur.author = line[len(\"Author: \"):]\n elif line.startswith(\"Date:\"):\n cur.date = line[len(\"Date: \"):]\n else:\n cur.message = \"\"\n else:\n cur.message += line.strip() + \"\\n\"\n \n if cur is not None:\n commits.append(cur)\n \n return commits", "def commits(self):\r\n return repocommits.RepoCommits(self)", "def extract_commit_data(repo, fields, result_format, log=LOG):\n results = []\n commits = list(repo.iter_commits())\n if not commits:\n msg = \"No commits found\"\n log.error(msg, commits=commits)\n raise GitToolException(msg)\n\n log = log.bind(total_commits=len(commits))\n log.debug(\"Filtering commits\", fields=fields)\n\n for c in commits:\n commit_metadata = dict() if result_format == 'dict' else []\n for f in fields:\n data = c\n try:\n for part in f.split('.'):\n data = getattr(data, part)\n if isinstance(data, six.string_types):\n data = data.strip()\n except AttributeError as e:\n msg = 'Commit missing an attribute'\n members = [x[0] for x in inspect.getmembers(data) if not x[0].startswith('_')]\n log.exception(msg, commit=c, attribute=f, data_obj=data, members=members, exc_info=e)\n raise GitToolException(msg)\n\n if result_format == 'dict':\n commit_metadata[f] = data\n else:\n commit_metadata.append(data)\n if result_format == 'flat_list':\n assert isinstance(commit_metadata, list)\n [results.append(x) for x in commit_metadata]\n else:\n results.append(commit_metadata)\n return results", "async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)", "def commits(self):\n p = Popen(['git', 'rev-list', '--all', '--timestamp', '--parents'], \n cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_info = line.split()\n if len(commit_info) < 2:\n print >> sys.stderr, \"error: bad line: %r\" % line\n continue\n timestamp = int(commit_info.pop(0))\n commit_info = map(CommitId, commit_info)\n commit_id = commit_info.pop(0)\n yield (timestamp, commit_id, commit_info)", "def grepCommits(query):\n with SHELL.execute(\n 'git', 'rev-list', 'HEAD', '--grep', query,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE) as commits:\n return commits.stdout.read()", "def get_commits(self):\n\n repo_commits = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n return None\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_commits_by_branch(branch['name'])\n if list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n # forms a list of commits as an 'get commits API' response\n commits_amount = 30 if len(sorted_commits) >= 30 else len(sorted_commits)\n result_list = sorted_commits[:commits_amount]\n\n return result_list", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def get_repo_commits(owner, repo, query_params=None, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/commits'\n return get_one_item_at_a_time(url, query_params, session)", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_commits(self):\n\n repo_commits = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n raise BitbucketRequestSenderExc('Can\\'t get branches for get_commits method')\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_commits_by_branch(branch['name'])\n if list_of_branch_commits is None:\n raise BitbucketRequestSenderExc(\n 'Can\\'t get commits by branch for get_commits method')\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n # forms a list of commits as an 'get commits API' response\n commits_amount = 30 if len(sorted_commits) >= 30 else len(sorted_commits)\n result_list = sorted_commits[:commits_amount]\n\n return result_list", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def get_commits(\n start: str | None = None,\n end: str = \"HEAD\",\n *,\n args: str = \"\",\n) -> list[GitCommit]:\n git_log_entries = _get_log_as_str_list(start, end, args)\n git_commits = []\n for rev_and_commit in git_log_entries:\n if not rev_and_commit:\n continue\n rev, title, author, author_email, *body_list = rev_and_commit.split(\"\\n\")\n if rev_and_commit:\n git_commit = GitCommit(\n rev=rev.strip(),\n title=title.strip(),\n body=\"\\n\".join(body_list).strip(),\n author=author,\n author_email=author_email,\n )\n git_commits.append(git_commit)\n return git_commits", "def commits(self):\r\n url = '{0}/commits'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def iter_commits(init_source_repo):\n source_repo_path, _, _ = init_source_repo\n repo = Repo(source_repo_path)\n return repo.iter_commits(\"master\")", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def get_commit_data(owner, repo, ref, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/commits/{ref}'\n return get_whole_response_as_json(url, session)", "def get_commits_in_branch(branch_name):\n output = subprocess.check_output(\"git log --pretty=format:'{}' {} {}\".format(git_format, branch_name, args.extra_args), shell=True)\n lines = output.decode(\"utf-8\").split(\"\\n\")\n out = []\n for line in lines:\n if len(line) <= 1: break\n [sha, author, message] = line.split(\"\t\", 2)\n out.append((sha, author, message))\n out.reverse()\n return out", "def load_commits(db, repo_name):\n\n SEP = \"-=:=-=:=-=:=-=:=-=:=-=:=-=:=-\"\n GITLOG = f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n SHORT_LINES = 5\n\n # $ git log --format=\"format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b\"\n # ---------------------\n # date: 2021-04-21T16:13:23-04:00\n # hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6\n # auth: [email protected]\n # name: Julia Eskew\n # subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)\n # Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.\n # ---------------------\n # date: 2021-04-15T21:36:47-04:00\n # hash: a1fe3d58dc112bd975f1237baaee787ba22929f1\n # auth: [email protected]\n # name: Albert (AJ) St. Aubin\n # subj: [bug] Corrected issue where program dash showed incorrect completed count\n # [MICROBA-1163]\n # \n # This change will correct an issue in the Program Dashboard where a user\n # would see a course as completed, but not see their Certificate because\n # it was not available to them yet.\n # ---------------------\n\n with db:\n commit_table = db[\"commits\"]\n\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + \"\\n\")):\n if commit:\n lines = commit.split(\"\\n\", maxsplit=SHORT_LINES)\n row = {\"repo\": repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(\": \", maxsplit=1)\n row[key] = val\n row[\"body\"] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def all_commits(change_id):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project)\n commits.append((project, path, ref))\n return commits", "def main(repo: Repository) -> dict:\n data = {}\n for commit in repo.traverse_commits():\n\n if not author_is_bot(commit.author):\n if commit.author.name in data:\n data[commit.author.name] = data[commit.author.name] + commit.lines\n else:\n data[commit.author.name] = commit.lines\n\n # Sort authors name in the dict\n data = dict(sorted(data.items(), key=lambda x: x[0].lower()))\n return data", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def get_all_commits(self):\n\n repo_commits = {}\n metadata = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n return None\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_all_commits_by_branch(branch['name'])\n\n if list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n\n # add metadata to method response for further updates by get_updated_all_commits\n metadata[branch['name']] = list_of_branch_commits[0]\n\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n return {'data': sorted_commits, 'metadata': metadata}", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def get_churn_per_commit(dateshas, excludestr):\n\tprint \"sha;date;churn\" # CSV header line\n\ttotal = 0\n\tfor date, sha in dateshas:\n\t\tcommit = None\n\t\tif excludestr:\n\t\t\t# Example command with filtering:\n\t\t\t# git show abcde -w -C --name-status --format=format: \n\t\t\t#\t\tOutputs all the changed files with just their filenames, \n\t\t\t#\t\tas paths from the repository root. -w flag ignores \n\t\t\t#\t\twhitespace differences, -C flag detects move moves and \n\t\t\t#\t\trenames and ignores those.\n\t\t\t# cut -f2,3:\n\t\t\t#\t\tCuts out the filename (column 2) and the rename \n\t\t\t#\t\tdestination (column 3, if exists). This is done to not \n\t\t\t#\t\thave the M/A/D/R modification indicator from the \n\t\t\t#\t\t--name-status output.\n\t\t\t# grep -v '^Documentation/':\n\t\t\t#\t\tFilters out all the files which are in the specified \n\t\t\t#\t\tfolders.\n\t\t\t# xargs -L 500 git show abcde -w -C --shortstat -- dummy\n\t\t\t#\t\txargs carries all the files that grep outputs over to git \n\t\t\t#\t\tshow, which formats the\tresult into a line of the form \n\t\t\t#\t\t'X files changed, Y insertions(+), Z deletions(-)'.\n\t\t\t#\t\tUsing xargs because OS X has a wonky and unpredictable \n\t\t\t#\t\targument list length limit,\tso this should makes the \n\t\t\t#\t\tscript more portable. 'dummy' is specified to ensure an \n\t\t\t#\t\tempty set from grep does not lead to 'git show' showing \n\t\t\t#\t\teverything.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--name-status', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcut = subprocess.Popen(['cut', '-f2,3'], stdin=show.stdout, \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tgrep = subprocess.Popen(['grep', '-v', excludestr], \n\t\t\t\t\t\tstdin=cut.stdout, stdout=subprocess.PIPE)\n\t\t\txargs = subprocess.Popen(['xargs', '-L', '500', 'git', 'show', \n\t\t\t\t\t\tsha, '-w', '-C', '--shortstat', \n\t\t\t\t\t\t'--format=format:', '--', 'dummy'], \n\t\t\t\t\t\tstdin=grep.stdout, stdout=subprocess.PIPE)\n\t\t\tcommit = xargs.stdout.readlines()\n\t\telse:\n\t\t\t# If there is no excludestr, we can simply ask for the shortstat \n\t\t\t# information.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--shortstat', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcommit = show.stdout.readlines()\n\n\t\t# Remove leading/trailing newlines\n\t\tcommit = [x[:-1] for x in commit if x != '\\n']\n\n\t\t# Because of the xargs approach, there might be multiple result \n\t\t# lines. Iterate over all of them and sum the churn. That is, if there \n\t\t# are actually results left after directory filtering\n\t\tchurn = 0\n\t\tfor line in commit:\n\t\t\tif len(line) > 0:\n\t\t\t\ttry:\n\t\t\t\t\tadded = int(line.split()[3])\n\t\t\t\texcept:\n\t\t\t\t\tadded = 0\n\t\t\tchurn += added\n\t\tif churn > 0:\n\t\t\ttotal += churn\n\t\t\tprint \"%s;%s;%d\" % (sha[:8],str(date), churn)\n\n\treturn total", "def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))", "def get_repo_data(gh, user, name, start, end):\n repo = gh.get_repo(name)\n\n commits = []\n issues = []\n\n all_commits = repo.get_commits(author=user, since=start, until=end)\n for commit in all_commits:\n # skip merge commits\n if len(commit.parents) == 1:\n commits.append(commit)\n\n all_issues = repo.get_issues(\n assignee=user, state='closed', sort='updated', direction='desc'\n )\n for issue in all_issues:\n if issue.updated_at < start:\n break\n if issue.closed_at > end:\n continue\n if issue.closed_at < start:\n continue\n issues.append(issue)\n\n return issues, commits", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def _format_commits(self, client, repo, commit_list):\n return [\n {\n 'id': c['id'],\n 'repository': repo.name,\n 'author_email': c['author_email'],\n 'author_name': c['author_name'],\n 'message': c['title'],\n 'timestamp': self.format_date(c['created_at']),\n 'patch_set': self._get_patchset(client, repo, c['id'])\n } for c in commit_list\n ]" ]
[ "0.70833033", "0.68818825", "0.68100065", "0.67859095", "0.67429984", "0.66558653", "0.66049534", "0.65828025", "0.6567603", "0.65087473", "0.64007646", "0.6381191", "0.6355372", "0.63061655", "0.6305687", "0.62776846", "0.6246901", "0.62380195", "0.61890846", "0.6171896", "0.6135823", "0.61266917", "0.61225957", "0.61143243", "0.6095508", "0.6081292", "0.6054235", "0.60452145", "0.6035727", "0.60083556" ]
0.7930775
0
Gets all the public repos
def get_public_repos(self, max_repos=DEFAULT_MAX_PUBLIC_REPOS): since = 0 repo_count = 0 repos = [] while repo_count < max_repos: temp = self.process_repo(self.get_full_url(ALL_REPO_LIST.format(since=since)), True) repos.extend(temp) repo_count = len(repos) #TODO count if repos <= max_repos print 'repos =', len(repos), 'temp=', len(temp) since = temp[-1]['id'] return repos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def get_repos():\n\n return __do_get_repos()", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def get_repos(self):\n return requests.get(\"https://api.github.com/user/repos\",\n headers=self.headers).json", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def repos(self):\r\n return repositories.Repos(self)", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def repos(self):\r\n return repos.Repos(self)", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def query_repos(self):\n return [self.config[\"repo\"]]", "def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url", "def list_repositories(self):\n repos = self.repo_conn.list_repositories()\n return repos", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]", "def get_repos(cls):\n dcSql = DevCenterSQL()\n repos = dcSql.get_repos()\n return {'status': True, 'data': repos}", "def fetch_repos(self):\n logging.info(\"Fetching repositories in: %s\" % self.name)\n list_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"list\",\n \"-n\",\n self.name,\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(list_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully fetched repositories from: %s\" % self.name)\n repos = result[\"output\"].split(\"\\n\")[:-1]\n logging.info(\"Total number of repositories: %d\" % len(repos))\n\n return repos", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def get_repositories(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Repositories\":\n\n return communicator.Repositories(self.__requester).fetch(parameters=params)", "def get_repositories(self) -> None:\n\n self.log.info(\"Fetching repositories for %s\", self.name)\n\n catalog = self.raw_client.get_catalog().json()\n self.log.info(\"Found the following repositories in registry %s:\", self.name)\n for repo in catalog['repositories']:\n tags = self.raw_client.get_tags(repo).json()['tags']\n if tags is None:\n tags = []\n self.log.debug(\"\\t%s with %s tags\", repo, len(tags))\n self.repositories[repo] = Repository(name=repo, registry=self, tags=tags)\n self.log.info(self.repositories[repo])", "def collect_org_repos(self):\n log.info(\"GHUB\", \"Collecting org repos.\")\n raw_repos = self._get_org_repos()\n preprocessed_repos = self._preprocess_repos(raw_repos)\n parsed_repos = json_reducer.reduce(REPOS_SCHEMA, preprocessed_repos)\n result = []\n for repo in parsed_repos:\n result.append(repo)\n return result" ]
[ "0.806337", "0.76858354", "0.76047444", "0.75227976", "0.7520303", "0.75177026", "0.7487321", "0.74738944", "0.7405739", "0.7387148", "0.7383653", "0.73297626", "0.7236298", "0.71387374", "0.71294105", "0.71154386", "0.7107003", "0.7050756", "0.70224655", "0.7014604", "0.70059806", "0.6981623", "0.68992704", "0.6888879", "0.68786126", "0.6854684", "0.68371284", "0.68223786", "0.68020993", "0.67951167" ]
0.7997378
1
Specifies that a property on a PlanningSolution class is a Collection of problem facts. A problem fact must not change during solving (except through a ProblemFactChange event). The constraints in a ConstraintProvider rely on problem facts for ConstraintFactory.from(Class).
def problem_fact_collection_property(fact_type): def problem_fact_collection_property_function_mapper(getter_function): ensure_init() from org.optaplanner.optapy import PythonWrapperGenerator from org.optaplanner.core.api.domain.solution import \ ProblemFactCollectionProperty as JavaProblemFactCollectionProperty getter_function.__return = PythonWrapperGenerator.getArrayClass(fact_type.__javaClass) getter_function.__optaplannerPlanningEntityCollectionProperty = { 'annotationType': JavaProblemFactCollectionProperty } return getter_function return problem_fact_collection_property_function_mapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def planning_entity_collection_property(entity_type):\n def planning_entity_collection_property_function_mapper(getter_function):\n ensure_init()\n from org.optaplanner.optapy import PythonWrapperGenerator\n from org.optaplanner.core.api.domain.solution import \\\n PlanningEntityCollectionProperty as JavaPlanningEntityCollectionProperty\n getter_function.__optaplannerPlanningEntityCollectionProperty = {\n 'annotationType': JavaPlanningEntityCollectionProperty\n }\n getter_function.__return = PythonWrapperGenerator.getArrayClass(entity_type.__javaClass)\n return getter_function\n return planning_entity_collection_property_function_mapper", "def collection(self):\r\n raise NotImplementedError", "def external_vars_validity_constraint(self, output_prop):\n return []", "def get_constraint_list(self):\n constraints = []\n for i in xrange(self.num_repeats):\n # Using start_index, start each domain at the correct index when flattening out points in COBYLA.\n constraints.extend(self._domain.get_constraint_list(start_index=self.dim * i))\n return constraints", "def problems(self):\n return self.configuration.problems", "def solutions_for_dep_problem(self, solution: 'DepAlgoSolution', found_problems: Set['DepAlgoFoundProblems'],\n installed_system: 'System', upstream_system: 'System',\n deps_to_deep_check: Set[str]) -> List['DepAlgoSolution']:\n\n def filter_solutions(solutions: Sequence['DepAlgoSolution']) -> List['DepAlgoSolution']:\n \"\"\"\n Filter given solutions so that only valid solutions are left\n or in case of no valid solutions only one invalid solution\n\n :param solutions: The solutions to filter\n :return: The filtered solutions\n \"\"\"\n return_solutions: List['DepAlgoSolution'] = []\n\n for solution in solutions:\n if not return_solutions:\n return_solutions.append(solution)\n continue\n\n first_solution = return_solutions[0]\n if first_solution.is_valid and solution.is_valid:\n return_solutions.append(solution)\n elif first_solution.is_valid:\n continue\n elif solution.is_valid:\n return_solutions = [solution]\n\n return return_solutions\n\n if self in solution.installed_solution_packages:\n return [solution.solution_copy()]\n\n # dep cycle\n # dirty... thanks to dep cycle between mesa and libglvnd\n if self in solution.visited_packages and not (self.type_of is PossibleTypes.REPO_PACKAGE):\n # problem only relevant\n # if the solution is not already invalid\n if solution.is_valid:\n index_of_self = solution.visited_packages.index(self)\n cycle_packages = []\n for i in range(index_of_self, len(solution.visited_packages)):\n cycle_packages.append(solution.visited_packages[i])\n cycle_packages.append(self)\n\n # create the problem\n cycle_problem = DepAlgoCycle(cycle_packages)\n for package in cycle_packages:\n cycle_problem.relevant_packages.add(package)\n cycle_problem.relevant_packages |= set(solution.dict_to_way.get(package.name, []))\n found_problems.add(cycle_problem)\n invalid_sol = solution.solution_copy()\n invalid_sol.is_valid = False\n return [invalid_sol]\n\n # pacman has to handle dep cycles between repo packages\n elif self in solution.visited_packages:\n return [solution.solution_copy()]\n\n # copy solution and add self to visited packages\n solution: 'DepAlgoSolution' = solution.solution_copy()\n is_build_available: bool = self in solution.packages_in_solution\n own_way: List['Package'] = solution.dict_to_way.get(self.name, [])\n own_not_to_delete_deps: Set[str] = set()\n solution.visited_packages.append(self)\n current_solutions: List['DepAlgoSolution'] = [solution]\n\n # filter not fulfillable deps\n relevant_deps = self.relevant_deps()\n for dep in relevant_deps[:]:\n\n # skip since already provided\n if installed_system.provided_by(dep):\n continue\n\n # skip since built package available and dep is not a normal dependency\n # so it's make and/or check dep\n if is_build_available and dep not in self.relevant_deps(only_depends=True):\n continue\n\n # dep not fulfillable, solutions not valid\n if not upstream_system.provided_by(dep):\n for solution in current_solutions:\n solution.is_valid = False\n\n # create problem\n dep_problem = DepAlgoNotProvided(dep, self)\n dep_problem.relevant_packages.add(self)\n dep_problem.relevant_packages |= set(own_way)\n found_problems.add(dep_problem)\n\n relevant_deps.remove(dep)\n\n # AND - every dep has to be fulfilled\n # we filtered the unfulfillable deps,\n # hence at least one dep provider is available\n for dep in relevant_deps:\n\n # skip since already provided\n if installed_system.provided_by(dep):\n continue\n\n # skip since built package available and dep is not a normal dependency\n # so it's make and/or check dep\n if is_build_available and dep not in self.relevant_deps(only_depends=True):\n continue\n\n # fetch dep providers\n dep_providers = upstream_system.provided_by(dep)\n dep_providers_names = [package.name for package in dep_providers]\n dep_stripped_name = strip_versioning_from_name(dep)\n\n # we only need relevant dep providers\n # deps_to_deep_check will be filled\n # when we encounter problems as dep-cycle, conflicts ...\n if dep_stripped_name in dep_providers_names and dep not in deps_to_deep_check:\n dep_providers = [package for package in dep_providers if package.name == dep_stripped_name]\n\n # OR - at least one of the dep providers needs to provide the dep\n finished_solutions = [solution for solution in current_solutions if dep in solution.visited_names]\n not_finished_solutions = [solution for solution in current_solutions if dep not in solution.visited_names]\n\n # check if dep provided by one of the packages already in a solution\n new_not_finished_solutions = []\n for solution in not_finished_solutions:\n if System(list(solution.installed_solution_packages)).provided_by(dep):\n finished_solutions.append(solution)\n else:\n new_not_finished_solutions.append(solution)\n not_finished_solutions = new_not_finished_solutions\n\n # track deps which may not be deleted\n for solution in current_solutions:\n if dep not in solution.not_to_delete_deps:\n solution.not_to_delete_deps.add(dep)\n own_not_to_delete_deps.add(dep)\n\n # calc and append new solutions\n current_solutions = finished_solutions\n # used for tracking problems\n new_problems_master: List[Set['DepAlgoFoundProblems']] = []\n found_problems_copy: Set['DepAlgoFoundProblems'] = set(found_problems)\n for solution in not_finished_solutions:\n\n # add dep to visited names\n # and create another container\n # for problem tracking\n solution.visited_names.add(dep)\n new_problems: List[Set['DepAlgoFoundProblems']] = []\n\n for dep_provider in dep_providers:\n # way to the package being called in the current solution\n if dep_provider.name not in solution.dict_to_way:\n way_added = True\n solution.dict_to_way[dep_provider.name] = own_way[:]\n solution.dict_to_way[dep_provider.name].append(self)\n else:\n way_added = False\n # tracking for which deps the package being called has been chosen as provider\n if dep_provider.name not in solution.dict_to_deps:\n solution.dict_to_deps[dep_provider.name] = set()\n solution.dict_to_deps[dep_provider.name].add(dep)\n\n # call this function recursively on the dep provider\n # and yield an empty found_problems set instance\n found_problems.clear()\n current_solutions.extend(\n dep_provider.solutions_for_dep_problem(\n solution, found_problems, installed_system, upstream_system, deps_to_deep_check\n )\n )\n # save the new problems\n new_problems.append(set(found_problems))\n # remove added things\n solution.dict_to_deps[dep_provider.name].remove(dep)\n if way_added:\n del solution.dict_to_way[dep_provider.name]\n\n # reset the problems to the problems\n # we had before calling the dep\n found_problems.clear()\n for problem in found_problems_copy:\n found_problems.add(problem)\n\n # if there is at least one valid solution\n # problems are not relevant\n # hence add an empty set containing no problems\n for problems in new_problems:\n if not problems:\n new_problems_master.append(set())\n break\n\n # if there are problems contained in all return values\n # show them to the user\n # will most likely be unfulfillable deps in general\n else:\n prob_in_all_ret = set.intersection(*new_problems)\n if prob_in_all_ret:\n new_problems_master.append(prob_in_all_ret)\n # otherwise append all found problems\n else:\n new_problems_master.append(set.union(*new_problems))\n\n # again - at least one valid solution\n # means new problems are not relevant\n if not_finished_solutions:\n for problems in new_problems_master:\n if not problems:\n break\n else:\n for problem in set.union(*new_problems_master):\n found_problems.add(problem)\n\n # filter solutions so that irrelevant solutions are not being\n # used anymore\n # great impact on the performance\n current_solutions = filter_solutions(current_solutions)\n\n # conflict checking\n for solution in current_solutions:\n # as with dep cycles,\n # conflicts are only relevant\n # if the solution is not already invalid\n if not solution.is_valid:\n continue\n\n # generate hypothetic system containing the packages of the current solution\n # and check for conflicts with that system\n installed_packages = list(solution.installed_solution_packages)\n conf_system = System(installed_packages).conflicting_with(self)\n\n # if there are no conflicts, nothing will get deleted, so we may\n # safely assume that we do not get an invalid solution\n if not conf_system:\n continue\n\n # append the whole current solution to the currently\n # installed system\n # may be empty in case of deep_search\n packages_to_append = solution.packages_in_solution[:]\n packages_to_append.append(self)\n new_system = installed_system.hypothetical_append_packages_to_system(packages_to_append)\n\n # prepare message for conflict\n additional_message = \"\"\n\n # if self cannot be added, this solution\n # is clearly not valid\n if self.name not in new_system.all_packages_dict:\n additional_message = \"Tried to install {}, but it was not possible.\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(self.name))\n )\n is_possible = False\n else:\n is_possible = True\n\n # these deps have to remain provided,\n # since they are needed for a package which\n # has not been installed yet\n # e.g. A needs B and C, B has been solved with this algo\n # but C not, hence B must remain provided\n # otherwise A cannot be installed\n for dep in solution.not_to_delete_deps:\n if not is_possible:\n break\n if not new_system.provided_by(dep):\n additional_message = \"While trying to install {}, the needed dependency {} has been removed\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(self.name)),\n Colors.BOLD(Colors.LIGHT_MAGENTA(dep))\n )\n is_possible = False\n break\n\n # same for packages which have to remain installed\n for package in installed_packages:\n if not is_possible:\n break\n if solution.dict_call_as_needed.get(package.name, False) \\\n and package.name not in new_system.all_packages_dict:\n additional_message = \"The package {} had to remain installed, \" \\\n \"but has been removed.\\n\" \\\n \"The package which lead to the removal is {}\" \\\n \"\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(package.name)),\n Colors.BOLD(Colors.LIGHT_MAGENTA(self.name)))\n\n break\n\n # solution possible at this point if there are no installed packages\n else:\n # check which packages have been removed\n # due to adding the packages\n for package in installed_packages:\n # remove all remainings of the package\n # besides the knowledge that the package\n # has already been built\n if package.name not in new_system.all_packages_dict:\n solution.installed_solution_packages.remove(package)\n if package.name in solution.dict_to_deps:\n for dep in solution.dict_to_deps[package.name]:\n solution.visited_names.remove(dep)\n del solution.dict_to_deps[package.name]\n if package.name in solution.dict_to_way:\n del solution.dict_to_way[package.name]\n\n # for the case that there are no installed packages\n if is_possible:\n continue\n\n # solution not possible!\n solution.is_valid = False\n conflicting_packages = set(conf_system)\n conflicting_packages.add(self)\n ways_to_conflict = []\n for package in conflicting_packages:\n way_to_conflict = solution.dict_to_way.get(package.name, [])[:]\n way_to_conflict.append(package)\n ways_to_conflict.append(way_to_conflict)\n\n # create the problem\n conflict_problem = DepAlgoConflict(conflicting_packages, ways_to_conflict)\n conflict_problem.additional_message = additional_message\n for way_to_conflict in ways_to_conflict:\n for package in way_to_conflict:\n conflict_problem.relevant_packages.add(package)\n found_problems.add(conflict_problem)\n\n # we have valid solutions left, so the problems are not relevant\n if [solution for solution in current_solutions if solution.is_valid]:\n found_problems.clear()\n\n # add self to packages in solution, those are always topologically sorted\n for solution in current_solutions:\n solution.not_to_delete_deps -= own_not_to_delete_deps\n solution.installed_solution_packages.add(self)\n solution.packages_in_solution.append(self)\n solution.visited_packages.remove(self)\n\n # may contain invalid solutions !!!\n # but also filtered\n return filter_solutions(current_solutions)", "def addProblems(self):\n if self.pid in Problem.problems: \n for prob in Problem.problems[self.pid]:\n subs = {'end': {'end': '2010-09-13'}}\n self._set_default_attrs(prob, subs)\n prob_string = PROBLEM.sub({\n 'onset':prob.start,\n 'resolution':prob.end,\n 'snomed':prob.snomed, \n 'name':prob.name\n }).done()\n self.data.append(SDMX.sub({'models':prob_string}, escape=False).done())", "def RestrictionOneToManyDependency(self, alphaCompId, betaCompId, noInstances):\n if self.solverTypeOptimize:\n\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbGe(bvars, 0))\n else:\n self.solver.assert_and_track(\n PbGe(noInstances * sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]), 0), \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1\n\n if self.solverTypeOptimize:\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbLe(bvars, 1 + noInstances))\n\n\n\n else:\n self.solver.assert_and_track(\n PbLe(noInstances *\n sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)])-1, noInstances),\n \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1", "def isCollection(self):\n unimplemented(self)", "def test_guess_and_set_use_collection_no_configuration(self) -> None:\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)", "def test_set_use_collection_not_bool(self) -> None:\n\n given = [\"Hello\", \"World!\"]\n\n self.assertRaises(TypeError, lambda: self.checker.set_use_collection(given))", "def constraints(self):\n ...", "def problem_list(self):\r\n return [{\r\n 'location': location, 'problem_name': name,\r\n 'num_graded': self.DUMMY_DATA['problem_list_num_graded'],\r\n 'num_pending': self.DUMMY_DATA['problem_list_num_pending'],\r\n 'num_required': self.DUMMY_DATA['problem_list_num_required']\r\n } for location, name in self.problems.items()\r\n ]", "def _validate_plurals(mapping: Mapping[str, Any],\n ref: str) -> List[SchemaError]:\n if 'classes' not in mapping:\n return []\n\n if 'properties' not in mapping:\n return []\n\n registry_property_to_class_name = dict() # type: Dict[str, str]\n\n for cls_mapping in mapping['classes']:\n if 'plural' in cls_mapping:\n plural = cls_mapping['plural']\n else:\n # Ignore a class that does not have a valid name.\n # Assume that this error will be caught by\n # the JSON schema validation.\n if 'name' not in cls_mapping:\n continue\n\n plural = mapry.naming.plural(identifier=cls_mapping['name'])\n\n plural_as_property = mapry.naming.json_plural(a_plural=plural)\n registry_property_to_class_name[plural_as_property] = (\n cls_mapping['name'])\n\n errs = [] # type: List[SchemaError]\n\n if isinstance(mapping['properties'], collections.OrderedDict):\n property_names = list(mapping['properties'].keys())\n else:\n property_names = sorted(mapping['properties'].keys())\n\n for property_name in property_names:\n if property_name in registry_property_to_class_name:\n errs.append(\n SchemaError(\n message=(\n 'Graph property {!r} conflicts with the plural '\n 'necessary for the registry of class {!r}').format(\n property_name,\n registry_property_to_class_name[property_name]),\n ref='{}/{}'.format(ref, property_name)))\n\n return errs", "def __init__(self, constraints: List[ConstraintExpr]):\n self.constraints = constraints", "def __init__(self, constraints: List[ConstraintExpr]):\n self.constraints = constraints", "def primitive_requirements(self):\n\t\treturn self.typemanager.primitive_list", "def objective_constraints(self, variables, mask, reservations, mpc_ene=None):\n constraint_list = []\n ice_gen = variables['ice_gen']\n on_ice = variables['on_ice']\n\n constraint_list += [cvx.NonPos(cvx.multiply(self.p_min, on_ice) - ice_gen)]\n constraint_list += [cvx.NonPos(ice_gen - cvx.multiply(self.rated_power*self.n, on_ice))]\n\n return constraint_list", "def constraints(self):\n return self._constraints", "def constraints(self):\n return self._constraints", "def updated_fixed_properties(cls, obj):\n out = super(ObjMetaschemaType, cls).updated_fixed_properties(obj)\n # Constrain dependencies for indexes into other elements\n depend_map = {'vertex_index': 'vertices', 'vertex_indices': 'vertices',\n 'texcoord_index': 'texcoords',\n 'normal_index': 'normals'}\n check_depends = {'lines': ['texcoord_index'],\n 'faces': ['texcoord_index', 'normal_index'],\n 'surfaces:vertex_indices': ['texcoord_index', 'normal_index']}\n for e, props in check_depends.items():\n sube = None\n if ':' in e:\n e, sube = e.split(':')\n if not ((e in obj) and isinstance(obj[e], (list, tuple))):\n continue\n req_flags = {k: False for k in props}\n for o in obj[e]:\n if sum(req_flags.values()) == len(props):\n break\n if isinstance(o, dict):\n assert(sube)\n if (((sube not in o) or (not isinstance(o[sube], (list, tuple)))\n or (len(o[sube]) == 0) or (not isinstance(o[sube][0], dict)))):\n continue\n for p in props:\n if p in o[sube][0]:\n req_flags[p] = True\n elif isinstance(o, (list, tuple)):\n if (len(o) == 0) or (not isinstance(o[0], dict)):\n continue\n for p in props:\n if p in o[0]:\n req_flags[p] = True\n # Set dependencies\n for p in req_flags.keys():\n if not req_flags[p]:\n continue\n if depend_map[p] not in out['dependencies'][e]:\n out['dependencies'][e].append(depend_map[p])\n # Contrain indices on number of elements refered to\n if ('vertices' in obj) and isinstance(obj['vertices'], (list, tuple)):\n out['definitions']['curve']['properties']['vertex_indices']['items'][\n 'maximum'] = len(obj['vertices']) - 1\n if ('params' in obj) and isinstance(obj['params'], (list, tuple)):\n out['definitions']['curve2D']['items']['maximum'] = len(obj['params']) - 1\n for e in ['line', 'face', 'surface']:\n if e == 'surface':\n iprop = out['definitions'][e]['properties']['vertex_indices'][\n 'items']['properties']\n else:\n iprop = out['definitions'][e]['items']['properties']\n for k, e_depends in depend_map.items():\n if k in iprop:\n if (e_depends in obj) and isinstance(obj[e_depends], (list, tuple)):\n iprop[k]['maximum'] = len(obj[e_depends]) - 1\n return out", "def external_vars_pr_one_constraint(self, output_prop):\n return []", "def create(\n cls: _CollectionAlias,\n parameter_handler: list,\n topology: Topology,\n bonds: Optional[SMIRNOFFBondCollection] = None,\n ) -> \"SMIRNOFFConstraintCollection\":\n if isinstance(parameter_handler, list):\n parameter_handlers = parameter_handler\n else:\n parameter_handlers = [parameter_handler]\n\n for parameter_handler in parameter_handlers:\n if type(parameter_handler) not in cls.allowed_parameter_handlers():\n raise InvalidParameterHandlerError(type(parameter_handler))\n\n collection = cls()\n collection.store_constraints(\n parameter_handlers=parameter_handlers,\n topology=topology,\n bonds=bonds,\n )\n\n return collection", "def __init__(self):\r\n self.tied_indices = []\r\n self.fixed_indices = []\r\n self.fixed_values = []\r\n self.constrained_indices = []\r\n self.constraints = []", "def validate_solutions(self, solutions: List[List['Package']], needed_packages: Sequence['Package']) -> List[\n Tuple['System', List['Package']]]:\n\n # calculate new systems\n new_systems = [self.hypothetical_append_packages_to_system(solution) for solution in solutions]\n valid_systems_tuples = []\n # find valid systems\n for i, new_system in enumerate(new_systems):\n for package in needed_packages:\n if package.name not in new_system.all_packages_dict:\n break\n else:\n valid_systems_tuples.append((new_system, solutions[i]))\n\n # no valid solutions\n if not valid_systems_tuples:\n return []\n\n # calculate the differences between the resulting systems for the valid solutions\n systems_differences = self.differences_between_systems(\n [valid_systems_tuple[0] for valid_systems_tuple in valid_systems_tuples])\n\n # delete duplicate resulting systems\n return_list = []\n already_seen_differences = set()\n for i, valid_systems_tuple in enumerate(valid_systems_tuples):\n difference_set = frozenset(set.union(systems_differences[1][i][0], systems_differences[1][i][1]))\n if difference_set not in already_seen_differences:\n already_seen_differences.add(difference_set)\n return_list.append(valid_systems_tuple)\n\n return return_list", "def getListOfConstraints(self, *args):\n return _libsbml.Model_getListOfConstraints(self, *args)", "def potential_new_obs(self) -> Iterable[GriddedPerm]:", "def problem_fact(fact_class):\n ensure_init()\n out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(fact_class)\n out.__javaClass = _generate_problem_fact_class(fact_class)\n _add_deep_copy_to_class(out)\n return out", "def from_problem(problem, autopass_constraints=True):\n\n def evaluate(constraint):\n if (\n autopass_constraints\n and constraint.enforced_by_nucleotide_restrictions\n ):\n return SpecEvaluation(\n constraint,\n problem,\n score=1,\n locations=[],\n message=\"Enforced by nucleotides restrictions\",\n )\n else:\n return constraint.evaluate(problem)\n\n return ProblemConstraintsEvaluations(\n [evaluate(constraint) for constraint in problem.constraints],\n problem=problem,\n )", "def compute_clique_potentials(self,F):\r\n\r\n for i in self.nodes():\r\n self.node[i]['fac'] = factor([],[],[])\r\n \r\n for f in F.factors: # assign each factor to a clique\r\n for j,data in self.nodes_iter(data=True):\r\n if len(scipy.setdiff1d(f.var,data['clique']) ) ==0:\r\n self.node[j]['fac'] *= f\r\n self.nop += scipy.prod(self.node[j]['fac'].card)\r\n break" ]
[ "0.5206215", "0.5189513", "0.50499916", "0.5004489", "0.50003415", "0.48822853", "0.48719192", "0.48710668", "0.48444247", "0.4822283", "0.48182458", "0.47978848", "0.47941014", "0.4786141", "0.47829002", "0.47829002", "0.47618714", "0.47462758", "0.47167763", "0.47167763", "0.47056627", "0.4690654", "0.4681131", "0.46603763", "0.46354806", "0.4612052", "0.46093592", "0.4570441", "0.45638585", "0.45616207" ]
0.6688512
0
Specifies that a property on a PlanningSolution class is a Collection of planning entities. Every element in the planning entity collection should have the PlanningEntity annotation. Every element in the planning entity collection will be added to the ScoreDirector.
def planning_entity_collection_property(entity_type): def planning_entity_collection_property_function_mapper(getter_function): ensure_init() from org.optaplanner.optapy import PythonWrapperGenerator from org.optaplanner.core.api.domain.solution import \ PlanningEntityCollectionProperty as JavaPlanningEntityCollectionProperty getter_function.__optaplannerPlanningEntityCollectionProperty = { 'annotationType': JavaPlanningEntityCollectionProperty } getter_function.__return = PythonWrapperGenerator.getArrayClass(entity_type.__javaClass) return getter_function return planning_entity_collection_property_function_mapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def problem_fact_collection_property(fact_type):\n def problem_fact_collection_property_function_mapper(getter_function):\n ensure_init()\n from org.optaplanner.optapy import PythonWrapperGenerator\n from org.optaplanner.core.api.domain.solution import \\\n ProblemFactCollectionProperty as JavaProblemFactCollectionProperty\n getter_function.__return = PythonWrapperGenerator.getArrayClass(fact_type.__javaClass)\n getter_function.__optaplannerPlanningEntityCollectionProperty = {\n 'annotationType': JavaProblemFactCollectionProperty\n }\n return getter_function\n return problem_fact_collection_property_function_mapper", "def collection(self):\r\n raise NotImplementedError", "def plans(self):\r\n return Plans(self)", "def isCollection(self):\n unimplemented(self)", "def plans(self):\r\n return pl.Plans(self)", "def RestrictionOneToManyDependency(self, alphaCompId, betaCompId, noInstances):\n if self.solverTypeOptimize:\n\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbGe(bvars, 0))\n else:\n self.solver.assert_and_track(\n PbGe(noInstances * sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]), 0), \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1\n\n if self.solverTypeOptimize:\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbLe(bvars, 1 + noInstances))\n\n\n\n else:\n self.solver.assert_and_track(\n PbLe(noInstances *\n sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)])-1, noInstances),\n \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1", "def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None", "def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None", "def assigned_plans(self):\n if \"assignedPlans\" in self._prop_dict:\n return AssignedPlansCollectionPage(self._prop_dict[\"assignedPlans\"])\n else:\n return None", "def __init__(self, collection):\n self.collection = collection", "def collection(cls):\n if not cls.__collection__:\n cls.__collection__ = AbstractModel.set_collection(\n cls.__collectionname__)\n\n return cls.__collection__", "def planning_solution(planning_solution_class):\n ensure_init()\n out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(planning_solution_class)\n out.__javaClass = _generate_planning_solution_class(planning_solution_class)\n _add_deep_copy_to_class(out)\n return out", "def assignments(self):\n if \"assignments\" in self._prop_dict:\n return AssignmentsCollectionPage(self._prop_dict[\"assignments\"])\n else:\n return None", "def assignments(self):\n if \"assignments\" in self._prop_dict:\n return AssignmentsCollectionPage(self._prop_dict[\"assignments\"])\n else:\n return None", "def __init__(self, location, collection_objects, collection_zone_name, name=\"Collection_target\"):\n super().__init__(location=location, name=name, class_callable=CollectionTarget, customizable_properties=None,\n is_traversable=True, is_movable=False, visualize_size=0, visualize_shape=0,\n is_drop_off_target=True, visualize_colour=None, visualize_depth=None, visualize_opacity=0.0,\n collection_objects=collection_objects, collection_zone_name=collection_zone_name,\n is_invisible=True)", "def is_collection(obj):\n return type(obj) in COLLECTIONS_SET", "def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:\n\n # create Occupancy costmap for the target object\n position, orientation = self.target.get_position_and_orientation()\n position = list(position)\n position[-1] = 0\n\n ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,\n origin=(position, orientation))\n # ocm.visualize()\n\n # working on a copy of the costmap, since found rectangles are deleted\n map = np.copy(ocm.map)\n\n # initialize result\n queries = []\n\n origin = np.array([ocm.height/2, ocm.width/2])\n\n # for every index pair (i, j) in the occupancy map\n for i in range(0, map.shape[0]):\n for j in range(0, map.shape[1]):\n\n # if this index has not been used yet\n if map[i][j] > 0:\n\n # get consecutive box\n width = ocm._find_consectuive_line((i, j), map)\n height = ocm._find_max_box_height((i, j), width, map)\n\n # mark box as used\n map[i:i+height, j:j+width] = 0\n\n # calculate to coordinates relative to the objects pose\n pose = np.array([i, j])\n lower_corner = (pose - origin) * ocm.resolution\n upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution\n rectangle = np.array([lower_corner, upper_corner]).T\n\n # transform to jpt query\n query = self.model.bind({\"x\": list(rectangle[0]), \"y\": list(rectangle[1])})\n queries.append(query)\n\n return queries", "def add_goal(self, goal):\n self.sys_da_array = []\n self.usr_da_array = []\n self.goal = deepcopy(goal)\n \n for domain in self.belief_domains:\n if 'final' in self.goal[domain]: #If 'final' key is present in goal\n for key in self.goal[domain]['final']:\n self.goal[domain][key] = self.goal[domain]['final'][key]\n del(self.goal[domain]['final'])\n self.cur_domain = ''\n self.complete_domain = []\n # self.booked = self._init_dict_booked()", "def add_goal(self, goal):\n self.sys_da_array = []\n self.usr_da_array = []\n self.goal = deepcopy(goal)\n for domain in self.belief_domains:\n if 'final' in self.goal[domain]:\n for key in self.goal[domain]['final']:\n self.goal[domain][key] = self.goal[domain]['final'][key]\n del(self.goal[domain]['final'])\n self.cur_domain = ''\n self.complete_domain = []\n self.booked = self._init_dict_booked()", "def accept(self, visitor: Any) -> Any:\n visitor.visit_collection(self)", "def __init__(self):\n self.departments = [\n Department(name, [])\n for name in [\"Science\", \"Computer Science\", \"Art\"]\n ]", "def planning_entity(entity_class):\n ensure_init()\n out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(entity_class)\n out.__javaClass = _generate_planning_entity_class(entity_class)\n _add_deep_copy_to_class(out)\n return out", "def collections(self, collections):\n\n self._collections = collections", "def collections(self, collections):\n\n self._collections = collections", "def collections(self, collections):\n\n self._collections = collections", "def collections(self, collections):\n\n self._collections = collections", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def multi_goal_given(self):\n goals = set(self.goals)\n for start in self.starts:\n yield Grid2DProblem(self.space, set([start]), goals)", "def assignGridInfo(self):\n\n for point in self.gridPoints:\n for house in self.houses:\n if (point.xLocation == house.xLocation and\n point.yLocation == house.yLocation):\n house.gridID = point.ID\n point.cost = [5000, 5000, 5000, 5000, 5000]\n for battery in self.batteries:\n if (point.xLocation == battery.xLocation and\n point.yLocation == battery.yLocation):\n battery.gridID = point.ID\n return True", "def test_set_use_collection_method(self) -> None:\n\n given = False\n expected = False\n\n self.checker.set_use_collection(given)\n\n actual = self.checker.use_collection\n\n self.assertEqual(expected, actual)" ]
[ "0.5543761", "0.5468283", "0.49544394", "0.49080592", "0.48011842", "0.4657647", "0.4588055", "0.4588055", "0.4588055", "0.45510972", "0.45053568", "0.44909558", "0.44828856", "0.44828856", "0.4442659", "0.44088638", "0.44076523", "0.43961158", "0.43705153", "0.43380395", "0.43277755", "0.43266708", "0.43157068", "0.43157068", "0.43157068", "0.43157068", "0.43045804", "0.43025035", "0.42991093", "0.42747802" ]
0.63169026
0
Specifies that a class is a problem fact. A problem fact must not change during solving (except through a ProblemFactChange event). The constraints in a ConstraintProvider rely on problem facts for ConstraintFactory.from(Class).
def problem_fact(fact_class): ensure_init() out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(fact_class) out.__javaClass = _generate_problem_fact_class(fact_class) _add_deep_copy_to_class(out) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_problem_class(Class, problem_object, seed, user, instance_directory):\n\n random = Random(seed)\n attributes = deepcopy(problem_object)\n\n # pass configuration options in as class fields\n attributes.update(dict(deploy_config))\n\n attributes.update({\n \"random\": random,\n \"user\": user,\n \"directory\": instance_directory,\n \"server\": deploy_config.hostname\n })\n\n return challenge_meta(attributes)(Class.__name__, Class.__bases__,\n Class.__dict__)", "def test_cls(self):\n assert issubclass(Factory, immutable.Immutable)", "def __class_validation(cls):\n\n # check if this class is a subClass of Model\n if not issubclass(cls, db.Model):\n raise AttributeError(cls.__name__ + \" is not subclass of \" + db.Model.__name__)", "def _check_c_not(self, constraint, *variables):\n c = {c.__class__ for c in self._layout.solver.get(*variables)}\n self.assertFalse(constraint in c)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def getProblem(self):\n return ProblemInstance(nDays=self.nDays,\n nSC=self.nSC,\n nGS=self.nGS,\n timewindows=self.timewindows,\n requirements=self.requirements)", "def from_problem(problem, autopass_constraints=True):\n\n def evaluate(constraint):\n if (\n autopass_constraints\n and constraint.enforced_by_nucleotide_restrictions\n ):\n return SpecEvaluation(\n constraint,\n problem,\n score=1,\n locations=[],\n message=\"Enforced by nucleotides restrictions\",\n )\n else:\n return constraint.evaluate(problem)\n\n return ProblemConstraintsEvaluations(\n [evaluate(constraint) for constraint in problem.constraints],\n problem=problem,\n )", "def XXXcheck_class_dependencies(self, node):\n # keep track of types which are used by methods arguments\n used_types = {}\n for method in node[\"methods\"]:\n self.check_function_dependencies(method, used_types)\n\n modules = {}\n for typ in used_types.values():\n if typ.f_module:\n for mname, only in typ.f_module.items():\n module = modules.setdefault(mname, {})\n if only: # Empty list means no ONLY clause\n for oname in only:\n module[oname] = True\n\n # Always add C_PTR, needed for class F_derived_member\n modules.setdefault(\"iso_c_binding\", {})[\"C_PTR\"] = True\n\n F_modules = [] # array of tuples ( name, (only1, only2) )\n for mname in sorted(modules):\n F_modules.append((mname, sorted(modules[mname])))\n node.F_module_dependencies = F_modules", "def isfixed(cls):\n return cls(\n classes={Var: ((), None), BooleanVar: ((), None)},\n data_classes={\n Var._ComponentDataClass: ((\"fixed\",), None),\n BooleanVar._ComponentDataClass: ((\"fixed\",), None),\n },\n )", "def test_cls(self):\n assert forge.cls == forge.FParameter(\n forge.FParameter.POSITIONAL_OR_KEYWORD,\n name='cls',\n interface_name='cls',\n contextual=True,\n )", "def conforms(classes):\n def decorator(cls):\n if not __debug__:\n return cls\n if not isinstance(cls, type):\n raise TypeError(\"Can only validate classes\")\n validate(cls, classes)\n return cls\n return decorator", "def _check_c(self, constraint, *variables):\n c = {c.__class__ for c in self._layout.solver.get(*variables)}\n if constraint:\n self.assertTrue(constraint in c)\n else:\n self.assertFalse(c)", "def set_problem(self, problem):\n self.problem = problem\n\n if self.problem is not None:\n self.label1.setText('Problem: ' + self.problem.get_name())\n if self.problem is not None and self.max_evaluations is not None:\n self.b.setEnabled(True)\n else:\n self.label1.setText('Please attach a Problem to the Input')\n self.b.setDisabled(True)", "def skip_if_invalid(self, descriptor_cls):\r\n pass", "def _validate_class(mapping: Mapping[str, Any], ref: str) -> List[SchemaError]:\n errs = [] # type: List[SchemaError]\n\n if 'id_pattern' in mapping:\n try:\n re.compile(mapping['id_pattern'])\n except Exception as err: # pylint: disable=broad-except\n errs.append(\n SchemaError(\n message='Invalid regular expression: {}'.format(err),\n ref='{}/id_pattern'.format(ref)))\n\n if 'properties' in mapping:\n if 'id' in mapping['properties']:\n errs.append(\n SchemaError(\n message=\"'id' is a reserved property of the class. \"\n \"If you want a pattern for class identifiers, \"\n \"use 'id_pattern'.\",\n ref='{}/properties'.format(ref)))\n\n return errs", "def class_is(cls: Class) -> bool:\n pass", "def opaque_class(self, classobj):\n self.restrict_class(classobj, None)", "def restrict_class(self, classobj, vars=None):\n if vars == None: vars = []\n self.instance_vars[classobj] = vars", "def problem_fact_collection_property(fact_type):\n def problem_fact_collection_property_function_mapper(getter_function):\n ensure_init()\n from org.optaplanner.optapy import PythonWrapperGenerator\n from org.optaplanner.core.api.domain.solution import \\\n ProblemFactCollectionProperty as JavaProblemFactCollectionProperty\n getter_function.__return = PythonWrapperGenerator.getArrayClass(fact_type.__javaClass)\n getter_function.__optaplannerPlanningEntityCollectionProperty = {\n 'annotationType': JavaProblemFactCollectionProperty\n }\n return getter_function\n return problem_fact_collection_property_function_mapper", "def ensure(cls, kind: 'dsl.Any') -> 'dsl.Any':\n if not cls.match(kind):\n raise _exception.GrammarError(f'{kind} is not a {cls.__name__}')\n return kind", "def del_restriction(self, RestrictionClass):\r\n self.restrictions = filter(\r\n lambda r: not isinstance(r, RestrictionClass),\r\n self.restrictions)", "def _resolve_class_min(self, class_min):\n if isinstance(class_min, int) or isinstance(class_min, float):\n return class_min\n raise TypeError('class_min has to be either non-negative int or float')", "def make_constraint(n_class):\n m = np.identity(n_class)\n m = np.vstack([m, np.ones(n_class)])\n\n lb = [epsilon] * n_class\n lb.append(1.0)\n ub = [1.0 - epsilon] * n_class\n ub.append(1.0)\n\n c = scipy.optimize.LinearConstraint(\n A=m,\n lb=lb,\n ub=ub,\n keep_feasible=True,\n )\n return c", "def test_item_class_relaxed_validation_02():\n assert not get_relaxed_validation_for_class(OPLoginItem)\n set_relaxed_validation_for_class(OPLoginItem)\n assert get_relaxed_validation_for_class(OPLoginItem)\n set_strict_validation_for_class(OPLoginItem)\n assert not get_relaxed_validation_for_class(OPLoginItem)", "def test_entities__Entity__getClass__2():\n e = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n e.getClass()", "def test_modeling__set_GP_model_fail_unit(training_data_covar_complex, tmp_modeling_class, nu):\n\n covars = training_data_covar_complex[0]\n train_X = training_data_covar_complex[1]\n train_Y = training_data_covar_complex[2]\n\n cls = tmp_modeling_class\n\n # set a few key attributes\n cls.train_X = train_X\n cls.proposed_X = train_X\n cls.train_Y = train_Y\n\n cls.model[\"model_type\"] = \"not_supported\"\n cls.model[\"response_sampled_iter\"] = cls.train_X.shape[0]\n\n # run the method\n with pytest.raises(Exception) as e:\n output_text = cls._set_GP_model(nu=nu)\n assert str(e.value) == \"greattunes._modeling._set_GP_model: unknown 'model_type' \" \\\n \"(\" + cls.model[\"model_type\"] + \") provided. Must be in following list \" \\\n \"['FixedNoiseGP', 'HeteroskedasticSingleTaskGP', 'SingleTaskGP', 'SimpleCustomMaternGP']\"", "def _constraints_external(self):\n pass", "def checkClass(trap:Quadrilateral):\n new = trap.checkSubClass()\n if isinstance(new, Kite):\n return new\n a,b,c,d = trap.vertices\n print(\n a.distanceTo(point=b),\n b.distanceTo(point=c),\n c.distanceTo(point=d),\n d.distanceTo(point=a)\n )\n raise ValueError(\n \"ValueError:\\tThe Quadrilateral can't \"+\n \"be constructed as a kite.\"\n )", "def __init__(self, constraints={}):\n self.constraints = constraints" ]
[ "0.5099245", "0.48803625", "0.48481956", "0.48131022", "0.47999078", "0.47999078", "0.477793", "0.47729102", "0.47412157", "0.46989408", "0.46854264", "0.46791378", "0.46720475", "0.46611643", "0.46604672", "0.46555266", "0.46472403", "0.46181303", "0.46012858", "0.4595112", "0.4587487", "0.45859522", "0.45706078", "0.45579067", "0.4537178", "0.45199847", "0.45120376", "0.45007658", "0.44957775", "0.44937962" ]
0.5774208
0
Specifies that the class is a planning solution (represents a problem and a possible solution of that problem). A possible solution does not need to be optimal or even feasible. A solution's planning variables might not be initialized (especially when delivered as a problem). A solution is mutable. For scalability reasons (to facilitate incremental score calculation), the same solution instance (called the working solution per move thread) is continuously modified. It's cloned to recall the best solution. Each planning solution must have exactly 1 PlanningScore property. Each planning solution must have at least 1 PlanningEntityCollectionProperty property. The class MUST allow passing None to all of __init__ arguments, so it can be cloned.
def planning_solution(planning_solution_class): ensure_init() out = JImplements('org.optaplanner.optapy.OpaquePythonReference')(planning_solution_class) out.__javaClass = _generate_planning_solution_class(planning_solution_class) _add_deep_copy_to_class(out) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, solution, **kwargs):\n self.solution = solution # set solution\n self.parameters = kwargs # set solution parameters\n self.scaling_factor = None", "def __init__(self, solution, **kwargs):\n self.solution = solution # set solution\n self.parameters = kwargs # set solution parameters", "def __init__(self, initial, goal=(3, 3, 0, 0, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def __init__(self, puzzle, g, path, goal):\n self.puzzle = puzzle\n self.puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n self.g = g\n self.h = self.__cost_to_goal(goal)\n self.path = path", "def __init__(self, bounds=None, feasible=None):\n if bounds is None:\n bounds = {}\n\n self.bounds = bounds\n self.feasible = feasible\n self.update()", "def __init__ (self, len_connections, station_objects, solution, max_minutes):\n\n self.len_connections = len_connections\n self.station_objects = station_objects\n self.state = copy.deepcopy(solution) \n self.K = self.state.set_K(len_connections)\n self.lining = []\n self.max_minutes = max_minutes", "def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"", "def solve(self):\n new_puzzle = self._puzzle.clone()\n self._solution = new_puzzle.solve_puzzle()\n del new_puzzle\n pass", "def buildSolverModel(self, lp):\n self._extract(lp)\n try:\n # Apply controls, warmstart etc. We do this here rather than in\n # callSolver() so that the caller has a chance to overwrite things\n # either using the `prepare` argument to callSolver() or by\n # explicitly calling\n # self.buildSolverModel()\n # self.callSolver()\n # self.findSolutionValues()\n # This also avoids setting warmstart information passed to the\n # constructor from actualResolve(), which would almost certainly\n # be unintended.\n model = lp.solverModel\n # Apply controls that were passed to the constructor\n for key, name in [\n (\"gapRel\", \"MIPRELSTOP\"),\n (\"timeLimit\", \"MAXTIME\"),\n (\"heurFreq\", \"HEURFREQ\"),\n (\"heurStra\", \"HEURSTRATEGY\"),\n (\"coverCuts\", \"COVERCUTS\"),\n (\"preSolve\", \"PRESOLVE\"),\n ]:\n value = self.optionsDict.get(key, None)\n if value is not None:\n model.setControl(name, value)\n\n # Apply any other controls. These overwrite controls that were\n # passed explicitly into the constructor.\n for option in self.options:\n if isinstance(option, tuple):\n name = optione[0]\n value = option[1]\n else:\n fields = option.split(\"=\", 1)\n if len(fields) != 2:\n raise PulpSolverError(\"Invalid option \" + str(option))\n name = fields[0].strip()\n value = fields[1].strip()\n try:\n model.setControl(name, int(value))\n continue\n except ValueError:\n pass\n try:\n model.setControl(name, float(value))\n continue\n except ValueError:\n pass\n model.setControl(name, value)\n # Setup warmstart information\n if self.optionsDict.get(\"warmStart\", False):\n solval = list()\n colind = list()\n for v in sorted(lp.variables(), key=lambda x: x._xprs[0]):\n if v.value() is not None:\n solval.append(v.value())\n colind.append(v._xprs[0])\n if _ismip(lp) and self.mip:\n # If we have a value for every variable then use\n # loadmipsol(), which requires a dense solution. Otherwise\n # use addmipsol() which allows sparse vectors.\n if len(solval) == model.attributes.cols:\n model.loadmipsol(solval)\n else:\n model.addmipsol(solval, colind, \"warmstart\")\n else:\n model.loadlpsol(solval, None, None, None)\n # Setup message callback if output is requested\n if self.msg:\n\n def message(prob, data, msg, msgtype):\n if msgtype > 0:\n print(msg)\n\n model.addcbmessage(message)\n except (xpress.ModelError, xpress.InterfaceError, xpress.SolverError) as err:\n raise PulpSolverError(str(err))", "def __init__(self, optimization: Optimization, error_on_fail: bool = False):\n\n ## Instance of the optimization problem.\n self.opt = optimization\n\n ## Initial guess for the optimization problem (set using reset_initial_seed).\n self.x0 = cs.DM.zeros(optimization.nx)\n\n ## Parameter vector.\n self.p = cs.DM.zeros(optimization.np)\n\n ## Parameter dictionary.\n self._p_dict = {}\n\n ## When True, after solve() is called, if the solver did not converge then a RuntimeError is thrown.\n self._error_on_fail = error_on_fail\n\n ## Solution container\n self._solution = None", "def __init__(self, shapes, pathways, take_first=False, relax_assignments_until=inf,\n relax_crossings_until=inf):\n self.shapes = tuple(sorted(shapes))\n self.pathways = pathways\n self.take_first = take_first # whether to raise after the first solution is found\n self.relax_assignments_until = relax_assignments_until\n self.relax_crossings_until = relax_crossings_until\n self.resources = flatten(pathways)\n\n nlevels = len(self.shapes)\n assert len(self.resources) == nlevels, (self.resources, nlevels, shapes, pathways)\n\n self.stats = { 'ncalls': 0\n , 'nlevels': nlevels\n , 'nsolutions': 0\n , 'npossible_solutions': count_possible_solutions(nlevels)\n , 'nnodes': count_nodes(nlevels)\n }\n\n # Make sure we can access shape definitions by id.\n self.s2shape = shapes\n\n # Give ourselves a way to find the pathway for a given resource.\n self.r2p = {}\n for k,v in pathways.items():\n for val in v:\n self.r2p[val] = k\n\n # And let's maintain a list of segments for each pathway.\n self.segments = {k:[] for k in pathways}\n\n # Maintain indices into shapes and resources for the current node while backtracking.\n self.pairs = [] # pairs of (shape_index, resource_index)\n self.shape_pool = set(range(nlevels))\n self.resource_pool = set(range(nlevels))\n self.siblings = [] # stack of siblings generators\n\n # We'll accumulate solutions into a list.\n self.solutions = []\n\n # Logfile!\n self.logfile = open('problem.log', 'w+')\n self.loglines = 0", "def __init__(self, goal):\n self.goal = None\n self.goal_state_value_dict = dict()\n self.num_goals_to_satisfy = 0\n self.set_goal(goal)", "def _initialize(self):\n self._solution = self._problem_instance.build_solution(method=\"Greedy\")\n\n while not self._problem_instance.is_admissible(self._solution):\n self._solution = self._problem_instance.build_solution(method=\"Greedy\")\n \n self._problem_instance.evaluate_solution(self._solution, feedback=self._feedback)", "def __init__(self, initial, goals, allowed):\n self.initial = initial # initial state\n self.goals = goals # list of goals that can be achieved\n self.allowed = allowed # the states we can move into", "def __init__(self, initial, goals, allowed):\n self.initial = initial # initial state\n self.goals = goals # list of goals that can be achieved\n self.allowed = allowed # the states we can move into ", "def get_solution(self):\r\n return self.solution", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def initialize_solution(self, graph):\n start = self.get_starting_node(graph)\n return Solution(graph, start, ant=self)", "def solution(self, solution):\n if solution is None:\n raise ValueError(\"Invalid value for `solution`, must not be `None`\") # noqa: E501\n if solution is not None and len(solution) < 1:\n raise ValueError(\"Invalid value for `solution`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._solution = solution", "def __init__(self, initial_state, puzzle, heuristic=Heuristic.MANHATTAN_DIST):\n self.initial_state = initial_state\n self.get_actions = puzzle.get_actions\n self.get_result = puzzle.get_result\n self.check_goal_state = puzzle.check_goal_state\n self.goal_coord = puzzle.goal_coord\n self.wall_coords = puzzle.wall_coords\n self.heuristic = heuristic", "def __init__(self,dimension =3,shape=(3,3,3), costFn = lambda x,y : 1, goal = None, start=None) :\r\n#self.walls = gameState.getWalls()\r\n\t\tself.dimension = dimension\r\n\t\tself.matrixWorld = matrix_generation2.generateDynamicMatrix(dimension,shape)\r\n\t\t#print(self.matrixWorld)\r\n\t\tself.startState = start\r\n\t\tif start == None or len(start)!= dimension: self.startState = tuple([1]*dimension)\r\n\t\tself.goal = goal\r\n\t\tif goal == None or len(goal)!= dimension: self.goal = tuple([dimension]*dimension)\r\n\t\tself.matrixWorld[self.startState] = False\r\n\t\tself.matrixWorld[self.goal] = False\r\n\t\tself.costFn = costFn\r\n\t\tprint(self.goal)", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def __init__(self, initial, goal=None):\n self.initial = initial\n self.goal = goal", "def __init__(self, initial, goal):\n self.initial = initial; self.goal = goal", "def objective(self) -> Optional[Union[int, float]]:\n if self.solution is not None:\n if isinstance(self.solution, list):\n return getattr(self.solution[-1], \"objective\", None)\n else:\n return getattr(self.solution, \"objective\", None)\n else:\n return None", "def __get_solver_instance(self):\n solver = self._solver\n\n # if a configured solver is not given, then build one of the given type\n from mystic.abstract_solver import AbstractSolver\n if isinstance(solver, AbstractSolver): # is a configured solver instance\n return solver\n if not hasattr(solver, \"Solve\"): # is an Error...\n raise TypeError, \"%s is not a valid solver\" % solver\n\n # otherwise, this is a solver class and needs configuring\n #from mystic.monitors import Monitor\n #stepmon = Monitor()\n #evalmon = Monitor()\n #maxiter = 1000\n #maxfun = 1e+6\n solver = solver(self.nDim)\n solver.SetRandomInitialPoints() #FIXME: set population; will override\n if self._useStrictRange: #XXX: always, settable, or sync'd ?\n solver.SetStrictRanges(min=self._strictMin, max=self._strictMax)\n solver.SetEvaluationLimits(self._maxiter, self._maxfun)\n solver.SetEvaluationMonitor(self._evalmon) #XXX: or copy or set?\n solver.SetGenerationMonitor(self._stepmon) #XXX: or copy or set?\n solver.SetTermination(self._termination)\n solver.SetConstraints(self._constraints)\n solver.SetPenalty(self._penalty)\n if self._reducer: #XXX: always, settable, or sync'd ?\n solver.SetReducer(self._reducer, arraylike=True)\n return solver", "def _CreateSolutionStrategy(self):\n raise Exception(\"Mesh motion solving strategy must be created by the derived class.\")" ]
[ "0.6299187", "0.6292075", "0.62344855", "0.6163733", "0.5826677", "0.581412", "0.5805651", "0.5738758", "0.5664172", "0.56512344", "0.56407416", "0.5592069", "0.55457485", "0.5484656", "0.5465259", "0.5464254", "0.54427224", "0.54386795", "0.5395744", "0.538688", "0.5372749", "0.53693837", "0.5357589", "0.53519595", "0.53307325", "0.53229856", "0.53078383", "0.5306999", "0.5295005", "0.5292622" ]
0.6527557
0
Marks a function as a ConstraintProvider. The function takes a single parameter, the ConstraintFactory, and must return a list of Constraints. To create a Constraint, start with ConstraintFactory.from(get_class(PythonClass)).
def constraint_provider(constraint_provider_function): ensure_init() constraint_provider_function.__javaClass = _generate_constraint_provider_class(constraint_provider_function) return constraint_provider_function
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createConstraint(*argv):", "def value_range_provider(range_id):\n def value_range_provider_function_wrapper(getter_function):\n ensure_init()\n from org.optaplanner.core.api.domain.valuerange import ValueRangeProvider as JavaValueRangeProvider\n getter_function.__optaplannerValueRangeProvider = {\n 'annotationType': JavaValueRangeProvider,\n 'id': range_id\n }\n return getter_function\n return value_range_provider_function_wrapper", "def add_constraint(self, var1, var2, constraint_fn) :\n self.constraints.append(Constraint(var1, var2, constraint_fn))\n return self", "def tool_factory_function(sources, alignment_node=None, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)", "def createConstraint(self):\n return _libsbml.Model_createConstraint(self)", "def decorate_with_checker(func: CallableT) -> CallableT:\n assert not hasattr(func, \"__preconditions__\"), \\\n \"Expected func to have no list of preconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postconditions__\"), \\\n \"Expected func to have no list of postconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postcondition_snapshots__\"), \\\n \"Expected func to have no list of postcondition snapshots (there should be only a single contract checker \" \\\n \"per function).\"\n\n sign = inspect.signature(func)\n if '_ARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_ARGS\" which is '\n 'a reserved placeholder for positional arguments in the condition.')\n\n if '_KWARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_KWARGS\" which is '\n 'a reserved placeholder for keyword arguments in the condition.')\n\n param_names = list(sign.parameters.keys())\n\n # Determine the default argument values\n kwdefaults = resolve_kwdefaults(sign=sign)\n\n id_func = id(func)\n\n # (mristin, 2021-02-16)\n # Admittedly, this branching on sync/async is absolutely monstrous.\n # However, I couldn't find out an easier way to refactor the code so that it supports async.\n # Python expects us to explicitly colour functions as sync/async so we can not just put in an if-statement and\n # introduce an \"await\".\n #\n # The two wrappers need to be manually maintained in parallel.\n # Whenever you make a change, please inspect manually that both sync and async code exercises equivalent behavior.\n # For example, copy/paste the two blocks of code in separate files and perform a diff.\n\n if inspect.iscoroutinefunction(func):\n\n async def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return await func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(postconditions, resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = await _assert_preconditions_async(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = await _capture_old_async(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = await func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = await _assert_postconditions_async(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n else:\n\n def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = _assert_preconditions(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = _capture_old(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs, func=func)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = _assert_postconditions(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n\n # Copy __doc__ and other properties so that doctests can run\n functools.update_wrapper(wrapper=wrapper, wrapped=func)\n\n assert not hasattr(wrapper, \"__preconditions__\"), \"Expected no preconditions set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postcondition_snapshots__\"), \\\n \"Expected no postcondition snapshots set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postconditions__\"), \"Expected no postconditions set on a pristine contract checker.\"\n\n # Precondition is a list of condition groups (i.e. disjunctive normal form):\n # each group consists of AND'ed preconditions, while the groups are OR'ed.\n #\n # This is necessary in order to implement \"require else\" logic when a class weakens the preconditions of\n # its base class.\n setattr(wrapper, \"__preconditions__\", [])\n setattr(wrapper, \"__postcondition_snapshots__\", [])\n setattr(wrapper, \"__postconditions__\", [])\n\n return wrapper # type: ignore", "def __init__(self, constraint: ConstraintExpr):\n self.constraint = constraint", "def get_constraint_func(ntype):\n\n func = None\n if ntype == 'pointConstraint':\n func = mc.pointConstraint\n\n elif ntype == 'orientConstraint':\n func = mc.orientConstraint\n\n elif ntype == 'parentConstraint':\n func = mc.parentConstraint\n\n elif ntype == 'scaleConstraint':\n func = mc.scaleConstraint\n\n elif ntype == 'aimConstraint':\n func = mc.aimConstraint\n\n elif ntype == 'normalConstraint':\n func = mc.normalConstraint\n\n elif ntype == 'poleVectorConstraint':\n func = mc.poleVectorConstraint\n\n elif ntype == 'tangentConstraint':\n func = mc.tangentConstraint\n\n elif ntype == 'geometryConstraint':\n func = mc.geometryConstraint\n\n elif ntype == 'pointOnPolyConstraint':\n func = mc.pointOnPolyConstraint\n\n return func", "def fit_custom_fx(self, custom_function, input_width, output_width, task_name):\n new_classifier = ClassifierNode(\n end_in_address=input_width,\n out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),\n classifier_name=task_name,\n given_predictor=custom_function\n )\n self.classifiers_current_count += output_width\n self.classifiers_list.append(new_classifier)", "def create_factory_function(tool_func):\n base = tool_function.__bases__[0]\n if base == Tool:\n def tool_factory_function(sources, alignment_node=None, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param sources: source nodes\n :param alignment_node: alignment node\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)\n\n return tool_factory_function\n elif base == MultiOutputTool:\n def tool_factory_function(source, splitting_node=None, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param source: source node\n :param splitting_node: splitting node\n :return: the created factor\n :type source: Node\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.source == source\n and m.splitting_node == splitting_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n source=source,\n splitting_node=splitting_node)\n\n return tool_factory_function\n\n elif base == AggregateTool:\n def tool_factory_function(sources, alignment_node, aggregation_meta_data, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param aggregation_meta_data: the meta data to aggregate over\n :param sources: source nodes\n :param alignment_node: alignment node\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(aggregation_meta_data=aggregation_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)\n\n return tool_factory_function\n elif base == SelectorTool:\n def tool_factory_function(sources, selector_meta_data, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param selector_meta_data: the meta data to select over\n :param sources: source nodes\n :return: the created factor\n :type sources: list[Node] | tuple[Node] | None\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.selector_meta_data == selector_meta_data\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(selector_meta_data=selector_meta_data, **parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources)\n\n return tool_factory_function\n elif base == PlateCreationTool:\n def tool_factory_function(source, **parameters):\n \"\"\"\n Factory function for creating factors inside a workflow\n\n :param source: source node\n :return: the created factor\n :type source: Node\n\n \"\"\"\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n return dict(\n workflow=self.current_workflow,\n tool=tool_func(**parameters),\n source=source)\n\n return tool_factory_function\n else:\n raise NotImplementedError", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def constraints(self, constraints):\n if constraints is None:\n constraints = []\n elif not isinstance(constraints, list):\n constraints = list(constraints)\n\n for i, constraint in enumerate(constraints):\n if isinstance(constraint, TopologicalConstraint):\n pass\n elif callable(constraint):\n constraints[i] = GenericTopological(constraint)\n else:\n raise TypeError('constraints must be callable or type TopologicalConstraint')\n\n AbstractRandomizer.constraints.__set__(self, constraints) # type: ignore", "def register(self, provider):\n for entry in dir(provider):\n try:\n provider_function = type(provider).__dict__[entry]\n factory_provider = getattr(provider_function, 'factory_provider', None)\n if factory_provider:\n provided_type, singleton = factory_provider\n if callable(provider_function): # A function or member function\n # if it's a bound method, this will get the bound version\n provider_member = getattr(provider, entry)\n self.add_factory(provided_type, provider_member, singleton)\n elif hasattr(provider_function, '__get__'):\n # this is a property or non-callable descriptor:\n self.add_factory(\n provided_type,\n functools.partial(provider_function.__get__, provider, provider),\n singleton,\n )\n else:\n self.add_service(provided_type, provider_function)\n except KeyError:\n pass", "def provider(name):\n def wrapper(cls):\n def wrapped(init):\n def register_event_callbacks(self):\n # NOTE(morganfainberg): A provider who has an implicit\n # dependency on other providers may utilize the event callback\n # mechanism to react to any changes in those providers. This is\n # performed at the .provider() mechanism so that we can ensure\n # that the callback is only ever called once and guaranteed\n # to be on the properly configured and instantiated backend.\n if not hasattr(self, 'event_callbacks'):\n return\n\n if not isinstance(self.event_callbacks, dict):\n msg = _('event_callbacks must be a dict')\n raise ValueError(msg)\n\n for event in self.event_callbacks:\n if not isinstance(self.event_callbacks[event], dict):\n msg = _('event_callbacks[%s] must be a dict') % event\n raise ValueError(msg)\n for resource_type in self.event_callbacks[event]:\n # Make sure we register the provider for each event it\n # cares to call back.\n callbacks = self.event_callbacks[event][resource_type]\n if not callbacks:\n continue\n if not hasattr(callbacks, '__iter__'):\n # ensure the callback information is a list\n # allowing multiple callbacks to exist\n callbacks = [callbacks]\n notifications.register_event_callback(event,\n resource_type,\n callbacks)\n\n def __wrapped_init__(self, *args, **kwargs):\n \"\"\"Initialize the wrapped object and add it to the registry.\"\"\"\n init(self, *args, **kwargs)\n REGISTRY[name] = self\n register_event_callbacks(self)\n\n resolve_future_dependencies(__provider_name=name)\n\n return __wrapped_init__\n\n cls.__init__ = wrapped(cls.__init__)\n _factories[name] = cls\n return cls\n return wrapper", "def validator_for(context_fn):\n\n def validator_for_decor(validator_fn):\n # Yes, this doesn't return a function! However, a Validator instance is\n # callable, so this is fine :)\n # See: https://stackoverflow.com/a/20791175 (and the other answers)\n return Validator(context_fn, validator_fn)\n return validator_for_decor", "def make_dataclass_validator(_cls: Type[Any], config: Type['BaseConfig']) -> 'CallableGenerator':\n dataclass_params = _cls.__dataclass_params__\n stdlib_dataclass_parameters = {param: getattr(dataclass_params, param) for param in dataclass_params.__slots__}\n cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters)\n yield from _get_validators(cls)", "def __init__(self, constraints: List[ConstraintExpr]):\n self.constraints = constraints", "def __init__(self, constraints: List[ConstraintExpr]):\n self.constraints = constraints", "def make_constraint(constraint):\n if isinstance(constraint, str) and constraint == \"array-like\":\n return _ArrayLikes()\n if isinstance(constraint, str) and constraint == \"sparse matrix\":\n return _SparseMatrices()\n if isinstance(constraint, str) and constraint == \"random_state\":\n return _RandomStates()\n if constraint is callable:\n return _Callables()\n if constraint is None:\n return _NoneConstraint()\n if isinstance(constraint, type):\n return _InstancesOf(constraint)\n if isinstance(\n constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)\n ):\n return constraint\n if isinstance(constraint, str) and constraint == \"boolean\":\n return _Booleans()\n if isinstance(constraint, str) and constraint == \"verbose\":\n return _VerboseHelper()\n if isinstance(constraint, str) and constraint == \"cv_object\":\n return _CVObjects()\n if isinstance(constraint, Hidden):\n constraint = make_constraint(constraint.constraint)\n constraint.hidden = True\n return constraint\n raise ValueError(f\"Unknown constraint type: {constraint}\")", "def tool_factory_function(source, splitting_node=None, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if\n f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.source == source\n and m.splitting_node == splitting_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n source=source,\n splitting_node=splitting_node)", "def addConstrs(self, constrs, name=''):\n ...", "def resolver(cls) -> Callable:\n annotations = dict(cls.__annotations__)\n annotations.pop('apply', None)\n defaults = {name: getattr(cls, name) for name in annotations}\n return functools.partial(resolve_annotations, annotations=annotations, defaults=defaults)", "def registerInParameterFactory(self) -> None:\n ...", "def create(\n cls: _CollectionAlias,\n parameter_handler: list,\n topology: Topology,\n bonds: Optional[SMIRNOFFBondCollection] = None,\n ) -> \"SMIRNOFFConstraintCollection\":\n if isinstance(parameter_handler, list):\n parameter_handlers = parameter_handler\n else:\n parameter_handlers = [parameter_handler]\n\n for parameter_handler in parameter_handlers:\n if type(parameter_handler) not in cls.allowed_parameter_handlers():\n raise InvalidParameterHandlerError(type(parameter_handler))\n\n collection = cls()\n collection.store_constraints(\n parameter_handlers=parameter_handlers,\n topology=topology,\n bonds=bonds,\n )\n\n return collection", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def createConstraint(schemaName, tableName, constraint):\n return constraints[constraint.kind](schemaName, tableName, constraint)", "def add(self, key: str, constraints_fcn: Callable, **kwargs: Any):\n constraints, constraints_jacobian, constraints_double_derivative = constraints_fcn(**kwargs)\n super(HolonomicConstraintsList, self)._add(\n key=key,\n constraints=constraints,\n constraints_jacobian=constraints_jacobian,\n constraints_double_derivative=constraints_double_derivative,\n )", "def constraint_for(dist=None, param=None):\n\n constraints = {\n 'atol':\n tfb.Softplus(),\n 'rtol':\n tfb.Softplus(),\n 'concentration':\n tfb.Softplus(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfb.Softplus(),\n 'concentration1':\n tfb.Softplus(),\n 'df':\n tfb.Softplus(),\n 'InverseGaussian.loc':\n tfb.Softplus(),\n 'JohnsonSU.tailweight':\n tfb.Softplus(),\n 'PowerSpherical.mean_direction':\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),\n 'ContinuousBernoulli.probs':\n tfb.Sigmoid(),\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.\n ), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tfb.Sigmoid(),\n 'NegativeBinomial.probs':\n tfb.Sigmoid(),\n 'Bernoulli.probs':\n tfb.Sigmoid(),\n 'PlackettLuce.scores':\n tfb.Softplus(),\n 'ProbitBernoulli.probs':\n tfb.Sigmoid(),\n 'RelaxedBernoulli.probs':\n tfb.Sigmoid(),\n 'cutpoints': # Permit values that aren't too large\n lambda x: tfb.Ascending().forward(10. * tf.math.tanh(x)),\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfb.Softplus(),\n 'mixing_rate':\n tfb.Softplus(),\n 'rate':\n tfb.Softplus(),\n 'scale':\n tfb.Softplus(),\n 'scale_diag':\n tfb.Softplus(),\n 'scale_identity_multiplier':\n tfb.Softplus(),\n 'tailweight':\n tfb.Softplus(),\n 'temperature':\n tfb.Softplus(),\n 'total_count':\n lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'GeneralizedNormal.power':\n tfb.Softplus(),\n }\n\n if param is not None:\n return constraints.get('{}.{}'.format(dist, param),\n constraints.get(param, tfb.Identity()))\n return constraints.get(dist, tfb.Identity())", "def get_provider(provider_config):\n def get_provider_class(cid):\n for klass in PROVIDERS:\n if klass.matches(cid):\n return klass\n name = provider_config['name']\n cls = get_provider_class(name)\n if cls is None:\n raise InvalidProvider('Could not find provider with id %s' % name)\n return cls(**provider_config)" ]
[ "0.53345704", "0.4972497", "0.4919507", "0.48735815", "0.48686048", "0.48448732", "0.48336023", "0.48179156", "0.48084193", "0.4801629", "0.47201443", "0.47076195", "0.47014117", "0.46949646", "0.46941438", "0.46845543", "0.46794537", "0.46794537", "0.46768197", "0.4663105", "0.465544", "0.46505886", "0.46458822", "0.45987383", "0.4595571", "0.4595571", "0.45698825", "0.45407924", "0.4537384", "0.45328918" ]
0.8258811
0