query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Creates an initial game state from a layout array (see layout.py). | def initialize( self, layout, numGhostAgents=1000 ):
self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py). | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, layout, player):\n self.layout = [x[:] for x in layout] #this state's layout is a copy\n self.height = len(layout[0])\n self.width = len(layout)\n self.who_played = player\n self.score = self._scoring() #score for this board",
"def initGame(width=19):\n state = np.zeros((width, width, 2))\n available = np.zeros((width, width))\n\n return state, available",
"def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]"
]
| [
"0.65404385",
"0.62726295",
"0.6086132",
"0.6086132",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214",
"0.6044214"
]
| 0.6842221 | 0 |
Processes the command used to run pacman from the command line. | def readCommand( argv ): ## argv belongs to the 'sys'-library and can be called through sys.argv. The function reads the console's comand line argument and passes it to a variable like so: args = sys.argv[1:]
from optparse import OptionParser ## Option Parser is a powerful library for passing command line options (an advanced args) if you like. It allows you to add options by defining attributes.
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr) ## This creates the Option Parser instance. It also passes the usageStr which functions as a little help-text for the user.
### In this section all the option strings are defined. Typically each option has one short option string and one long option string. For example the parser.add_option('-n'... has '-n' as short and '--numGames' as the long option string. Both have the same effect. The option argument will be the same and be saved as the variabme 'numGames'.
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1) ## the syntax for the options is (based on the example in this line) --n 3. This means that the value '3' would be assigned to the variable numGames.
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'), #The instance -> 'options.layout' defines the layout_file from which to load the map layout; DEFAULT = medium_classic
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'), #The instance -> 'options.pacman' defines which of the agent TYPE in the pacmanAgents moduleto use.
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
#ONCE ALL THE OPTIONS HAVE BEEN DEFINED, optparse is instructed to parse the programm's command line.
##> The parser.parse_args() returns two values:
### (A) OPTIONS: An object containing values for all of your options e.g.:e.g. if --file takes a single string argument, then options.file will be the filename supplied by the user, or None if the user did not supply that option
### (B) ARGS: The list of positional arguments leftover after parsing options (we call this here otherjunk)
options, otherjunk = parser.parse_args(argv) ## if the user happens to accidentally enter a command other than the specified arguments specified by parser.add_option it is passed to otherjunk
if len(otherjunk) != 0: ## if there actually ends up to be a value in the otherjunk the program raises an Exception.
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict() # ARGS IS THE VARIABLE THAT IS BEING RETURNED BY THE readCommand function.
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188') # 'random.seed' is part of the random class. The random.seed([x]) command initialises a standard random number. Optional argument x can be any hashable object.
# Choose a layout
args['layout'] = layout.getLayout( options.layout ) # REF_LAYOUT111: layout.py --> This function returns the layout object that was created by the layout class via the getlayout function. This contains the height, width, walls, food, captules and agent positions etc.
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics) ## noKeyboard is set to TRUE if the user chooses the --replay and text- or silent graphics option.
##print noKeyboard
pacmanType = loadAgent(options.pacman, noKeyboard) ## [see REFERENCE_001]: the loadAgent function takes the pacman argument the user passed into the command line as the option--pacman option identifies the appropriate agent (which may be the programmed agent or whost agent).
agentOpts = parseAgentArgs(options.agentArgs) ##Passes the option.agentArgs which was captured by the user's console input into the agentOps variable. agentArgs is: "Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3. The ParseAgentArgs function converts the option - value pairings into a dictionary formatted opts[opt1] = val1.
if options.numTraining > 0: ##numTraining was captured by the user's console input and designates how many games are training games which means that the output remains surpressed.
args['numTraining'] = options.numTraining ## This takes the user's input as the -x or --numTraining and passes it to the args dictionary with the numTraining key as the args['numTraining'] variable.
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining ## This integrates the variable entered into as training rounds in the agentOpts variable.
pacman = pacmanType(**agentOpts) ## REFERENCE002 ##Instantiate Pacman with agentOpts. ## The variable pacmanType contains a reference to agent module loaded by the load Agent function. This function does not cause the module to be instanciated. This happens when here ## See[REFERENCE_001]: ## The * and ** will 'soak up' any remaining values not otherwise accounted for. In this case these options are basically the agent options the user can input.
## agentOpts contains the opts dictionary = {opt1:val1, opt2:val2, opt3:val3}; it also contains the numTraining variable as the ['numTraining'] key. As such it has the following structure. {opt1:val1,opt2:val2,opt3:val3, numTraining:int}.
args['pacman'] = pacman ## This passes the instanciated object to the agent dictionary containing the pacman key.
# Don't display training games
if 'numTrain' in agentOpts: ## Checks whether the user has determined a certain number of training games. If they did, the number is passed on as an int to the options.numQuiet and option.numIgnore variables.
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard) ## The options.ghost variable contains the user's ghost type preference as specified in the console.The user can choose between -g RandomGhost which is A ghost that chooses a legal action uniformly at random OR DirectionalGhost, a ghost that prefers to rush Pacman, or flee when scared.
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )] #instanciates as many ghost agents as the player requested by entering the desired number as -k', '--numghosts'in the console.
# Choose a display format ##contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay ## This refers to the module that is responsible for the graphical representation of the game.
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime) ## This line instanciates the PacmanGraphics class from the graphicsDisplay module and passes the reference to the args['display'] dictionary.
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print 'Replaying recorded game %s.' % options.gameToReplay
import cPickle
f = open(options.gameToReplay)
try: recorded = cPickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args #returns the args-dictionary which contains:
##args['pacman'] which contains a dictionary of dictionaries of the agent that was loaded into args['numtraining'] = {agentOpts[opt1]: val1 ; agentOpts[opt2]:val2; agentOpts[opt3]:val3}
##args['layout'] - this function returns the layout object that was created by the layout class via the getlayout function.
##args['numTraining'] which contains which designates how many games are training games which means that the output remains surpressed
##args['ghosts'] - contains the instanciated ghost agents in line with the number the user specified
##args['display'] - contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)
##args['numGames'] - the number of GAMES to play
##args['record'] - Writes game histories to a file (named by the time they were played)
##args['catchExceptions'] = options.catchExceptions - Turns on exception handling and timeouts during games
##args['timeout'] = options.timeout -Maximum length of time an agent can spend computing in a single game | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)",
"def pacman(args, force_sudo=None, output=True):\n # Handle sudo\n cmd = ['sudo'] if _parse_bool(get('PACMAN_SUDO')) else []\n # Handle force sudo\n if force_sudo is not None:\n cmd = ['sudo'] if force_sudo else []\n # Select caller regarding to the need of output or not\n caller = check_output if output else call\n # Call pacman\n output = caller(cmd + [get('PACMAN_CMD')] + args, universal_newlines=True)\n # Return the output\n return output",
"def running_from_pacman():\n ppid = os.getppid()\n p_name = subprocess.check_output(['ps', '-p', str(ppid), '-o', 'comm='])\n p_name = p_name.decode().rstrip()\n if ARGV.get(DEBUG_OPT):\n err_print('informant: running from: {}'.format(p_name))\n return p_name == 'pacman'",
"def process_command() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--spider',\n help='Please input the scrapy spider name',\n type=str,\n required=True)\n return parser.parse_args()",
"def parse_command(self) -> None:\n\n # Verify the command length and existence\n if self.command_str is not None and len(self.command_str) >= 1:\n\n # Split the command into two parts\n tmp_list: list = self.command_str.split(\" \", 1)\n\n # Verify the command format\n if tmp_list[0][0] == \"!\":\n self.name = tmp_list[0]\n if len(tmp_list) == 2:\n self.arg = tmp_list[1]",
"def cmd_appe(args):",
"def process_cmd(config, cmd):\n # Separate command from arguments\n cmd_parts = cmd.split(' ', 1)\n head = cmd_parts[0]\n args = ''\n if len(cmd_parts) == 2:\n args = cmd_parts[1]\n\n # Call the command\n if not common.call_cmd(head, config, args):\n print(\"RabbitHole: Unknown command '{}'\".format(head))",
"def _make_cmdline(self, line):\n if isinstance(line, list):\n parts = line\n else:\n parts = line.split(\" \", 1)\n cmd = parts[0]\n exe = os.path.join(BINDIR, cmd)\n\n python_cmds = [\"samba-tool\",\n \"samba_dnsupdate\",\n \"samba_upgradedns\",\n \"script/traffic_replay\",\n \"script/traffic_learner\"]\n\n if os.path.exists(exe):\n parts[0] = exe\n if cmd in python_cmds and os.getenv(\"PYTHON\", None):\n parts.insert(0, os.environ[\"PYTHON\"])\n\n if not isinstance(line, list):\n line = \" \".join(parts)\n\n return line",
"def get_command(pid):",
"def handle_cmdline():\n\n cmdline = ArgumentParser(init_args=['address', 'arch', 'file'],\n address_required=True, address_default=None,\n file_required=True,\n file_help='Flash or memory image to inspect',\n formatter_class=RawDescriptionHelpFormatter,\n usage=_USAGE, description=_DESCRIPTION, epilog=_EPILOG)\n\n cmdline.add_argument('--longhelp',\n choices=['Y', 'N'],\n default=None,\n help=_LONGHELP_TEXT)\n\n cmdline.add_argument('--autocomplete',\n choices=['Y', 'N'],\n default=None,\n help=_AUTOCOMPLETE_TEXT)\n\n cmdline.add_argument('--threshold',\n type=int,\n default=5,\n help='Minimum table size to report. Default: 5')\n\n cmdline.add_argument('--subcmds',\n action='store_true',\n default=False,\n help='Include sub-command tables in displayed results')\n\n cmdline.add_argument('--details',\n action='store_true',\n default=False,\n help='Display more detailed output')\n\n args = cmdline.parse_args()\n\n if args.longhelp is not None:\n args.longhelp = args.longhelp == 'Y'\n\n if args.autocomplete is not None:\n args.autocomplete = args.autocomplete == 'Y'\n\n return args",
"def parse(self, message):\n # The message does not contain the command name.\n # self.args will become the parsed Namespace object.\n\n # For command aliases, add the prepend string\n message = \"{} {}\".format(self.arguments_prepend, message)\n\n parser = self.get_parser()\n\n message = message.replace(\"'\", \"<<APOS>>\")\n message = message.replace('\\\\\"', \"<<QUOT>>\") # explicit \\\"\n try:\n message = shlex.split(message, posix=False)\n # posix=False does not remove quotes\n message = [m.strip('\"') for m in message]\n except ValueError as e:\n # raised if shlex detects fucked up quotemarks\n # message = message.split()\n raise CommandParsingError(\n \"Unmatched quotemark. Use \\\\\\\" to escape a literal quotemark\"\n ) from e\n message = [w.replace(\"<<APOS>>\", \"'\") for w in message]\n message = [w.replace(\"<<QUOT>>\", '\"') for w in message]\n try:\n # Can throw ArgumentError\n self.args = parser.parse_args(message)\n except CommandParsingHelp as error:\n # The error contains the get_usage string but I'm going to just\n # ignore that\n raise (\n MyFaultError if self.__doc__ is None else CommandUsageMessage\n )(self.make_command_help_string()) from error",
"def _extract_command(self, args):\n opts = self.gopts[:]\n for cmd in self.ctable.values():\n opts.extend(cmd.opts)\n sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)\n\n lopts,largs = getopt.getopt(args, sfl, lfl)\n if not largs:\n return None\n return self._command(largs[0])",
"def _from_command_line():\n # Manim can be called from the command line in three different\n # ways. The first two involve using the manim or manimcm commands\n prog = os.path.split(sys.argv[0])[-1]\n from_cli_command = prog in [\"manim\", \"manimcm\"]\n\n # The third way involves using `python -m manim ...`. In this\n # case, the CLI arguments passed to manim do not include 'manim',\n # 'manimcm', or even 'python'. However, the -m flag will always\n # be the first argument.\n from_python_m = sys.argv[0] == \"-m\"\n\n return from_cli_command or from_python_m",
"def query_cmdline():",
"async def manga(self, ctx: commands.Context) -> None:\n if ctx.invoked_subcommand is None:\n if ctx.subcommand_passed is None:\n # There is no subcommand: inform the user about the manga reader.\n summary = \"\"\"Hi! Welcome to Bakerbot's manga reader.\n This cog houses commands for searching and reading manga.\n See `$help mangadex` for a full list of available subcommands.\"\"\"\n\n embed = utilities.Embeds.standard()\n embed.set_footer(text=\"Powered by the Mangadex API.\", icon_url=utilities.Icons.info)\n embed.description = summary\n await ctx.reply(embed=embed)\n else:\n # The subcommand was not valid: throw a fit.\n command = f\"${ctx.command.name} {ctx.subcommand_passed}\"\n summary = f\"`{command}` is not a valid command.\"\n footer = \"Try $help mangadex for a full list of available subcommands.\"\n embed = utilities.Embeds.status(False, summary)\n embed.set_footer(text=footer, icon_url=utilities.Icons.cross)\n await ctx.reply(embed=embed)",
"def verun(cmd):\n run('pew in {0} {1}'.format(package_name(), cmd))",
"def parseInputLine(self, line):\r\n if line is not None and line is not '':\r\n func = getattr(self, 'cmd_' + line.split()[0].upper(), None)\r\n if func is not None:\r\n func(line.split()[1:])\r\n else:\r\n self.terminal.write('No such command')\r\n self.showPrompt()",
"def parse_command_line(self):\n return self.command_line_parser.parse()",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)",
"def process_commands():\n # Parse and handle each different command\n args = parse_arguments()\n\n pdfsplit.pdf_split(args.file, args.pages, args.offset)",
"def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()",
"def process_command_line():\n\n # Add the command lien arguments\n parser = argparse.ArgumentParser(description=\"test autocontext\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # Arguments\n parser.add_argument(\"--ilastik\", type=str, required=True,\n help=\"path to the file run_ilastik.sh\")\n\n parser.add_argument(\"--train\", type=str,\n help=\"path to the ilastik project that will be used for training\")\n\n parser.add_argument(\"--batch_predict\", type=str,\n help=\"path of the cache folder of a previously trained autocontext that will be used for batch \"\n \"prediction\")",
"def OnModCommand(self, message):\n\n argument = QuotedString(quoteChar='(', endQuoteChar=')', escChar='\\\\') | Regex(r'(?!--)[^\\s]+')\n arguments = ZeroOrMore(argument)\n command = Word(alphas)\n kwarg = command+Suppress(Optional(Literal('=')))+argument\n kwargs = Suppress(Literal('--')) + ZeroOrMore(kwarg.setParseAction(tuple))\n commandWithArgs = StringStart() + command + Group(arguments) + Group(Optional(kwargs)) + StringEnd()\n\n try:\n pCommand, args, kw = commandWithArgs.parseString(message)\n except ParseException as e:\n self.PutModule('Invalid command {}'.format(e))\n return znc.CONTINUE\n\n\n if not pCommand:\n self.PutModule('No command')\n return znc.CONTINUE\n\n method = getattr(self, 'cmd_' + pCommand.lower(), None)\n\n if method is None:\n self.PutModule('Invalid command {}'.format(pCommand))\n return znc.CONTINUE\n\n try:\n method(*args, **dict(list(kw)))\n except TypeError as e:\n self.PutModule('Usage: {}{}\\n{}'.format(pCommand, signature(method), e))\n return znc.CONTINUE\n\n return znc.CONTINUE",
"def OnModCommand(self, commandline):\n import shlex\n argv = shlex.split(str(commandline))\n from .cmdopts import RAWSEP\n if RAWSEP in argv:\n argv, *rest = str(commandline).partition(RAWSEP)\n argv = shlex.split(argv)\n argv.append(\"\".join(rest))\n #\n command, *args = argv\n command = self.approx.decmd(command.lower()) # these alone don't check\n mod_name = self.approx.encmd(command) # membership; see tests\n if mod_name not in self.mod_commands:\n msg = \"Invalid command\"\n if command.startswith(\"debug_\") and not self.debug:\n msg += \"; for debug-related commands, pass DEBUG=1\"\n self.put_pretty(msg)\n return\n #\n if (command == \"debug_args\" and\n args and args[0] not in (\"--help\", \"-h\")):\n args = [\"--\"] + args\n namespace = self.parse_command_args(mod_name, args)\n if namespace is None:\n return\n #\n try:\n self.mod_commands[mod_name](**vars(namespace)) # void\n except Exception:\n self.print_traceback()\n # Raising here makes znc print something about the command not\n # being registered\n return",
"def exe(self, inp):\n try:\n spl = shlex.split(inp)\n except:\n self.err_print('Mismatched quotations.')\n self.command_event.set()\n return\n\n if not spl:\n self.err_print(\"\")\n elif spl[0] in self.commands:\n self.err_print(\"\")\n self.commands[spl[0]](spl[1:])\n else:\n self.err_print('Invalid command: ' + spl[0])\n\n self.command_event.set()",
"def _process_command(self, **kwargs):\n return self.run_command(**kwargs)",
"def onecmd(self, line):\n cmd, arg, line = self.parseline(line)\n if not line:\n return self.emptyline()\n if cmd is None:\n return self.default(line)\n self.lastcmd = line\n if line == 'EOF' : #end the loop\n self.lastcmd = ''\n return True\n if cmd == '':\n return self.default(line)\n else:\n func = None\n for context in reversed(self.__class__.context_stack):\n func = context.resolve_cmd(cmd)\n if not func:\n break\n if not func:\n func = self.__class__.top_context.resolve_cmd(cmd)\n if not func:\n return self.default(line)\n args = self.cmdline_parse(arg)\n return func(args)",
"def ParseManualCommand(hc, *args):\n arg_count = len(args)\n \n hc = hc.upper()\n cmd = map(lambda s:s.upper(), args)\n \n house_code = hc[0]\n\n # Check if this is a whole address\n if IsValidUnitAddress(hc):\n conn.SendAddr(hc)\n # ...or just a house code letter (arg_count > 1 -> unit codes + command)\n elif arg_count > 1 and len(hc) == 1 and hc in \"ABCDEFGHIJKLMNOP\":\n # Parse the next part as if it were numbers which would\n # complete the address\n for n in cmd[0].split(\",\"):\n if n.isdigit() and 0 < int(n) < 17:\n conn.SendAddr(house_code + n)\n else:\n print \"Warning: %s is an invalid unit code\"%n\n \n # Make sure there is a command to process\n if arg_count > 0:\n if cmd[-1] in (\"ON\", \"OFF\", \"ALL\"):\n conn.SendFunc(house_code + \" \" + cmd[-1], 0)\n \n elif 0 < abs(int(cmd[-1])) < 101:\n dim_bright_val = int(cmd[-1])\n percent = (abs(dim_bright_val) / 100.0) * 22\n\n if dim_bright_val < 0:\n conn.SendFunc(house_code + \" dim\", percent)\n\n elif dim_bright_val > 0:\n conn.SendFunc(house_code + \" bright\", percent)\n\n else:\n print \"Error: Command must be ON or OFF and dim/bright level must be between -100% and 100%!\"",
"def parse_command(self, command):\n \n if string_find(command.cmd, 'file dump') or string_find(command.cmd, 'file cdump'):\n command.cmd = create_dump_command(command.cmd)\n\n return command",
"def command(self, cmd):\n self.lmp.command(cmd)"
]
| [
"0.60683686",
"0.6021038",
"0.5957683",
"0.57956135",
"0.57873833",
"0.5664116",
"0.5649994",
"0.5588615",
"0.555994",
"0.55404824",
"0.5530758",
"0.5511282",
"0.5501264",
"0.549662",
"0.548287",
"0.54800344",
"0.5472996",
"0.54409564",
"0.542683",
"0.5424045",
"0.5418257",
"0.5410606",
"0.5406318",
"0.540358",
"0.5399995",
"0.5396754",
"0.53923726",
"0.5377081",
"0.53702724",
"0.536872"
]
| 0.65644956 | 0 |
Create this CSR from a buffer | def from_buffer(data, encoding='pem'):
return X509Csr.from_open_file(io.BytesIO(data), encoding) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def frombuffer(buffer, **kwargs):\n\n return call_origin(numpy.frombuffer, buffer, **kwargs)",
"def create_from_buffer(cls, buffer: str) -> \"TensorImage\":\n image_data = image_utils.decode_image_from_buffer(buffer, len(buffer))\n return cls(image_data, is_from_numpy_array=False)",
"def from_buffer(cls, buffer: bytes):\n header = SignalHeader.from_buffer(buffer)\n if header is None:\n return None\n self = cls(header=header)\n log.debug(\n \"Received Signal PDU {} with len {}\".format(\n self.header.type, self.header.len\n )\n )\n self.items_from_buffer(buffer)\n return self",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaCertificate._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def from_buffer(data):\n opcode = ustruct.unpack(ATT.struct_format, data[:ATT.struct_size])[0]\n\n # att = uctypes.struct(\n # uctypes.addressof(data[:ATT.struct_size]),\n # ATT_STRUCT,\n # uctypes.LITTLE_ENDIAN\n # )\n\n data = data[ATT.struct_size:]\n return ATT(opcode, data)",
"def load_from_buffer(self, buffer):\n loader = GazpachoObjectBuilder(buffer=buffer, app=self._app)\n self._read_from_loader(loader)",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChainDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChain._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519CertificateDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def __init__(self):\n self.buffer = bytearray()",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignature._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def from_buffer(cls, data, codec):\n new = cls()\n new.codec = codec\n new.data = codec.parse(data)\n return new",
"def __init__( self, buffer, start_offset, bytes_reverse=False, bits_reverse=False, output_reverse=False, bytes_to_cache=1 ):\n assert is_bytes( buffer )\n assert start_offset in range( len( buffer ) )\n self.buffer = buffer\n self.bits_reverse = bits_reverse\n self.bytes_reverse = bytes_reverse\n self.output_reverse = output_reverse\n self.pos = start_offset\n self.bytes_to_cache = bytes_to_cache\n self._fill_buffer()",
"def _build_bufferview(buffer, target, byte_length, byte_offset, byte_stride):\n new_buffer_view = {\n \"buffer\": buffer,\n \"byteLength\": byte_length,\n \"byteOffset\": byte_offset\n }\n\n properties_keys = [\"target\", \"byteStride\"]\n properties_values = [target, byte_stride]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_buffer_view[key] = target\n\n return new_buffer_view",
"def __init__(self, buffer_size: int, batch_size: int):\n self.buffer: list = list()\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.idx = 0",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def create_buf(self, num_bytes, cacheable = 0):\n if self.buf is None:\n self.buf = libxlnk.cma_alloc(num_bytes, cacheable)\n if self.buf == ffi.NULL:\n raise RuntimeError(\"Memory allocation failed.\")\n else:\n libxlnk.cma_free(self.buf)\n self.buf = libxlnk.cma_alloc(num_bytes, cacheable)\n bufPhyAddr = libxlnk.cma_get_phy_addr(self.buf)\n self._bufPtr = ffi.cast(\"uint32_t *\",bufPhyAddr)\n self.bufLength = num_bytes",
"def __init__(self, n_taps, dtype='float'):\n self.buffer = np.zeros(n_taps, dtype)\n self.n_taps = n_taps",
"def from_buffer(cls, buffer: bytes):\n if len(buffer) < cls.SIZE:\n return None\n unpacked = struct.unpack(\"!ccccHH\", buffer[: cls.SIZE])\n if unpacked[:4] != (b\"D\", b\"L\", b\"E\", b\"P\"):\n log.error(\"No valid DLEP header found\")\n return None\n return cls(SignalType(unpacked[4]), unpacked[5])",
"def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)",
"def from_buffer(self, buf):\n with self.lock:\n # if we're on python3, convert buf to bytes\n # otherwise this string is passed as wchar*\n # which is not what libmagic expects\n if type(buf) == str and str != bytes:\n buf = buf.encode('utf-8', errors='replace')\n return magic_buffer(self.cookie, buf)",
"def __init__(self, max_entries, buffer_config):\n self.max_entries = max_entries\n \n self.data_dict = dict()\n for key, val in buffer_config.items():\n if (type(val) == int):\n shape = (self.max_entries, val)\n elif (type(val) == tuple):\n shape = (self.max_entries,) + val\n else:\n raise Exception(\"Not a valid buffer_config.\")\n self.data_dict[key] = np.zeros(shape)\n\n self.start_idx = 0\n self.num_entries = 0",
"def _load_from_buffer(self): # noqa: E501\n self._load_from_resource()",
"def from_buffer(cls, slots, buf, offset = 0, fileno = None, fileoffs = None):\n locked = fcntl is not None and fileno is not None\n size = cls.size(slots)\n try:\n if locked:\n fcntl.lockf(fileno, fcntl.LOCK_EX, size, fileoffs)\n return cls(slots, buf, offset, locked)\n finally:\n if locked:\n fcntl.lockf(fileno, fcntl.LOCK_UN, size, fileoffs)",
"def __init__(self, buf=None):\n if buf:\n self.unpack(buf)",
"def __init__(self, geo_model=None):\n self.rex_bytes = bytearray()\n self.n_bytes = 0\n\n self.data_id = 0\n self.geo_model = geo_model",
"def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))",
"def _create_bufferview(self, name, buffer, byte_length, byte_offset, byte_stride, target=None):\n new_buffer_view = self._build_bufferview(buffer=self._resolve_mapping(inp=buffer, mapping=self.buffers_map),\n target=target,\n byte_length=byte_length,\n byte_offset=byte_offset,\n byte_stride=byte_stride)\n\n self.bufferViews.append(new_buffer_view)\n\n if name:\n self.bufferViews_map[name] = self._last_index(self.bufferViews)\n\n return self._last_index(self.bufferViews)"
]
| [
"0.6514623",
"0.61291546",
"0.60954344",
"0.59913373",
"0.5929997",
"0.5730285",
"0.56947875",
"0.56856996",
"0.5677521",
"0.5608824",
"0.55828583",
"0.5568209",
"0.5565738",
"0.5550145",
"0.5483738",
"0.54315054",
"0.5386336",
"0.5300147",
"0.52866995",
"0.5259141",
"0.52512574",
"0.5250253",
"0.5226712",
"0.52073556",
"0.5115248",
"0.5093108",
"0.5084805",
"0.5067542",
"0.5057997",
"0.5043356"
]
| 0.6781523 | 0 |
Create this CSR from a file on disk | def from_file(path, encoding='pem'):
try:
with open(path, 'r') as f:
return X509Csr.from_open_file(f, encoding)
except IOError:
raise X509CsrError("Could not read file %s" % path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CreateCrtFile(keyfile, csrfile):\n crtfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'x509',\n '-req',\n '-days', '1',\n '-in', csrfile,\n '-signkey', keyfile,\n '-out', crtfile\n ]\n _RunCommand(cmd)\n return crtfile",
"def create_from_file(cls, path):\n\n with open(path, 'r') as file:\n # Possible FileNotFound.\n text = file.read()\n return cls.create_from_string(text)",
"def CreateCsrFile(keyfile):\n csrfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'req',\n '-new',\n '-key', keyfile,\n '-out', csrfile,\n '-subj', '/C=NA/ST=NA/L=NA/O=Chromium/OU=Test/CN=chromium.org'\n ]\n _RunCommand(cmd)\n return csrfile",
"def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)",
"def from_file(cls, file_obj):\n kdm = cls(file_obj.read())\n return kdm",
"def from_file(self, filename):\n return EGStub.from_file(filename).to_cryptosystem()",
"def create_csr(dn):\n tmp_file = f'/tmp/{get_temp_filename()}'\n key_filename = f'{tmp_file}.key'\n csr_filename = f'{tmp_file}.csr'\n cmd = [\n \"openssl\",\n \"req\",\n \"-subj\", f'{dn}',\n \"-newkey\", f'rsa:{rsa_keysize}',\n \"-keyout\", f'{key_filename}',\n \"-out\", f'{csr_filename}',\n \"-nodes\"\n ]\n exec_cmd(cmd)\n return read_keypair(key_filename, csr_filename)",
"def create_token(filename):\n\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n\n sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)\n vk = sk.verifying_key\n if vk is not None:\n line = encode_line(\"signing-key\", sk.to_der(), vk.to_der())\n\n with open(filename, \"w\") as f:\n f.write(line)",
"def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)",
"def fromFile(cls, filepath):\r\n return cls(values=foamFileFromFile(filepath, cls.__name__))",
"def create(self):\n self.file = open(self.filename, \"xb\", buffering=self.bufferSize)",
"def from_file(cls, path):\n raise NotImplementedError",
"def load_file(self):\n offset = self.meta.segmentid * DATASIZE\n filename = self.meta.filename.encode('UTF-8')\n snc.snc_load_file_to_context(c_char_p(filename), offset, self.sc)",
"def from_buffer(data, encoding='pem'):\n return X509Csr.from_open_file(io.BytesIO(data), encoding)",
"def from_file(cls, filepath):\n fp = open(filepath, 'rb')\n return cls(fp)",
"def from_file(self, path, **kwargs):\n\t\twith codecs.open(path, 'r', encoding='utf-8') as file_h:\n\t\t\tsource = file_h.read()\n\t\treturn self.from_string(source, **kwargs)",
"def from_file(cls, filename):\n\n f = libc.fopen(filename, \"r\")\n if f == 0:\n raise IOError(\"No such file\")\n\n try:\n map_ptr = ipset.ipmap_load(f)\n if map_ptr == 0:\n raise IOError(\"Could not read IP map\")\n\n return cls(None, map_ptr)\n\n finally:\n libc.fclose(f)",
"def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_record)\n\t\treturn rec",
"def load_from_file(cls, file=None, file_path=None):\n if not file:\n file = open(file_path, 'r') \n if not file_path:\n file_path = file.name\n with file:\n file_meta = cls._get_file_meta(file, file_path=file_path)\n cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])\n cls(key_name=file_path, **cls_properties).put()",
"def __init__(self, file_name, key):\n try:\n self._file_name = file_name\n self._encryptor = AES(key.encode())\n self._document = open(self._file_name, \"rb+\")\n except Exception as error:\n print(error)\n sys.exit(1)",
"def create_records_from_file(path_to_file):\n fd_xml_file = open(path_to_file, \"r\")\n xml = fd_xml_file.read()\n fd_xml_file.close()\n\n return create_record(xml)",
"def init_csr(privkey, names, cert_dir):\n csr_pem, csr_der = crypto_util.make_csr(privkey.pem, names)\n\n # Save CSR\n le_util.make_or_verify_dir(cert_dir, 0o755)\n csr_f, csr_filename = le_util.unique_file(\n os.path.join(cert_dir, \"csr-letsencrypt.pem\"), 0o644)\n csr_f.write(csr_pem)\n csr_f.close()\n\n logging.info(\"Creating CSR: %s\", csr_filename)\n\n return le_util.CSR(csr_filename, csr_der, \"der\")",
"def from_file(cls, path):\n metadata = get_metadata_from_file(path)\n metadata[\"path\"] = path\n _validate(metadata)\n inst = cls(**metadata)\n return inst",
"def from_file(self, path):\n data, sr = self.loader(path)\n return self.from_array(data, sr)",
"def from_file(cls, filepath):\n fp = open(filepath, 'r')\n\n return cls(fp)",
"def fromFile(cls, filepath):\r\n with open(filepath) as f:\r\n return cls('\\n'.join(f.readlines()))",
"def fromfile(self,file):\n self.d.update(params_file(file))",
"def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)",
"def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)",
"def from_file(cls, file_path, vocab_size, character_coverage, model_type, params):\n return super().from_file(file_path, vocab_size, character_coverage,\n DE_C_INTER_SENTENCEPIECE_MODE[model_type], params)"
]
| [
"0.6182878",
"0.6152139",
"0.61361444",
"0.59857833",
"0.58985895",
"0.5867565",
"0.58489877",
"0.5822996",
"0.563695",
"0.56063586",
"0.5573587",
"0.55555207",
"0.5551064",
"0.55499285",
"0.5524422",
"0.5514425",
"0.55135655",
"0.54952925",
"0.549421",
"0.54896677",
"0.54896027",
"0.5470057",
"0.5463287",
"0.5421012",
"0.5417658",
"0.5404743",
"0.5388257",
"0.53760767",
"0.53760767",
"0.5371786"
]
| 0.68524605 | 0 |
Get the public key from the CSR | def get_pubkey(self):
return self._csr['certificationRequestInfo']['subjectPublicKeyInfo'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_public_key(self):\n# _log.debug(\"get_public_key\")\n certpath, cert, certstr = self.get_own_cert()\n try:\n cert = load_pem_x509_certificate(certstr, default_backend())\n except Exception as err:\n _log.error(\"Failed to load X509 certificate from PEM, err={}\".format(err))\n raise\n return cert.public_key()",
"def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)",
"def get_pubkey(self):\n return EVP.PKey(m2.x509_req_get_pubkey(self.req), _pyfree=1)",
"def extractPublicKey(cert):\n pk = cert.get_pubkey()\n\n b = _util.binding\n l = b.lib\n ffi = b.ffi\n rsa = l.EVP_PKEY_get1_RSA(pk._pkey)\n buf = ffi.new(\"unsigned char **\")\n length = l.i2d_RSA_PUBKEY(rsa, buf)\n pk = ffi.buffer(buf[0], length)[:]\n ffi.gc(buf[0], l.OPENSSL_free)\n return pk",
"def get_public_key(self):\n return self.public_key",
"def public_key(self):\n return self._public_key",
"def public_key(self):\n return PublicKey(self._sk.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw))",
"def get_public_key(self):\n return self.private_key.get_verifying_key()",
"def get_public_key_in_pem(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public",
"def get_pub_rsa_key(pub_key):\n return RSA.importKey(pub_key)",
"def get_google_public_cert_key() -> RSAPublicKey:\n r = requests.get(GOOGLE_PUBLIC_CERT_URL)\n r.raise_for_status()\n\n # Load the certificate.\n certificate = x509.load_pem_x509_certificate(r.content, default_backend())\n\n # Get the certicate's public key.\n public_key = certificate.public_key()\n\n return public_key",
"def get_public_key(self) -> str:\n\t\treturn self._publicKey",
"def GetPublicKey(self):\n return self.public_key",
"def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()",
"def get_pubkey(pem):\n der = ssl.PEM_cert_to_DER_cert(pem)\n\n # Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280)\n cert = DerSequence()\n cert.decode(der)\n tbsCertificate = DerSequence()\n tbsCertificate.decode(cert[0])\n subjectPublicKeyInfo = tbsCertificate[6]\n\n return subjectPublicKeyInfo",
"def test_public_key_req(self):\n csr = \"\"\"-----BEGIN CERTIFICATE REQUEST-----\nMIHcMIGDAgEAMCExDzANBgNVBAMMBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwWTAT\nBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQiDp4E4+/kzbPgA22wm6RuKYpZfTiVqcR0\nAuxu7bE0IMFcnQgnhJ3e7gbWq+spfSYEd3vJQ8a6L7tu+nTziY1qoAAwCgYIKoZI\nzj0EAwIDSAAwRQIhAMRpKf1c6Z0qgTCNxyKXZGsc4i/qxfqxzcZ/QK7Ot9TeAiA7\nAPUerdBAf4HdigxiwcckjZ8TG1snkyp/qVuMhxSDEg==\n-----END CERTIFICATE REQUEST-----\"\"\"\n x509req = crypto.load_certificate_request(PEM, csr)\n self.assertEqual(utils.public_key_type(x509req), c.KEY_EC)",
"def get_public_key(self, kid):\n resp = self.request(self.jwks_url(), method=\"GET\")\n resp.raise_for_status()\n\n # find the proper key for the kid\n for key in resp.json()[\"keys\"]:\n if key[\"kid\"] == kid:\n return self.jwt_key_to_pem(key)\n raise DecodeError(f\"Cannot find kid={kid}\")",
"def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public",
"async def retrieve_public_key(self, kid: str) -> str:\n\n directory = tedious.config.CONFIG['KEYS']['public-keys']\n async with aiofiles.open(os.path.join(directory, kid), mode='r') as file:\n public_key = await file.read()\n return public_key",
"def public_key():\n if not Authorizer.__public_key:\n Authorizer.__public_key = download_public_key()\n return Authorizer.__public_key",
"def get_public_key(cert_file):\n # Use OpenSSL to extract public key\n command = 'openssl x509 -inform pem -in %s -pubkey -noout'\n command = command % cert_file\n command = shlex.split(command)\n\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n key_ascii = pipe.stdout.read()\n\n return key_ascii",
"def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)",
"def encode_csr(self, csr):\n return csr.public_bytes(\n serialization.Encoding.PEM,\n ).decode(encoding='UTF-8')",
"def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()",
"def public_signing_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"public_signing_key\")",
"def rsa_public_key(self, modulus: int, exponent: int) -> rsa.RSAPublicKey:\n return rsa.RSAPublicNumbers(exponent, modulus).public_key(default_backend())",
"def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed",
"def get_public_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/public_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']",
"def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)",
"def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)"
]
| [
"0.7291749",
"0.7151937",
"0.70317656",
"0.69595",
"0.6945708",
"0.694188",
"0.69205004",
"0.6918894",
"0.6846553",
"0.6839397",
"0.68323153",
"0.6822918",
"0.6772169",
"0.6771975",
"0.67513555",
"0.67417294",
"0.6733704",
"0.6703111",
"0.6654886",
"0.6640319",
"0.66141635",
"0.65900725",
"0.6581304",
"0.6580504",
"0.65662694",
"0.655226",
"0.65476805",
"0.65238374",
"0.6514127",
"0.64879596"
]
| 0.7473664 | 0 |
Get the subject name field from the CSR | def get_subject(self):
ri = self.get_request_info()
if ri['subject'] is None:
ri['subject'] = None
# setup first RDN sequence
ri['subject'][0] = None
subject = ri['subject'][0]
return name.X509Name(subject) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subject(self) -> str:\n return self[\"Sns\"][\"Subject\"]",
"def get_name(self):\n return self.load_name(self.subject)",
"def get_certificate_name(cert_data) -> str:\r\n if cert_data is None:\r\n return None\r\n\r\n cert = x509.load_pem_x509_certificate(cert_data, default_backend())\r\n for fields in cert.subject:\r\n current = str(fields.oid)\r\n if \"commonName\" in current:\r\n return fields.value",
"def subject(self):\n return self.get(\"subject\")",
"def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")",
"def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")",
"def subject(self):\n subject = re.sub(RE_PATTERNS, '', self.header('Subject', ''))\n subject = re.sub(FW_PATTERNS, '', subject)\n return subject.strip()",
"def getSubject(self):\r\n return self.msg[\"Subject\"]",
"def get_pretty_subject(cert):\n subject = 'subject=' + _get_pretty_name(cert.get_subject())\n issuer = 'issuer=' + _get_pretty_name(cert.get_issuer())\n return subject + '\\n' + issuer + '\\n'",
"def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())",
"def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())",
"def get_csr_common_name(self, csr):\n cns = csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)\n if cns:\n # get_attributes_for_oid returns a list, but there should only be a\n # single cn attribute, so just return the first item.\n return cns[0].value\n return None",
"def get_user_provided_subject_identifier_attrname(self):\n return None",
"def subject(self):\n return self.properties.get(\"subject\", None)",
"def subject(self):\n if \"subject\" in self._prop_dict:\n return self._prop_dict[\"subject\"]\n else:\n return None",
"def subject(self):\n return self.mail.get('Subject')",
"def get_pubkey(self):\n return self._csr['certificationRequestInfo']['subjectPublicKeyInfo']",
"def subject(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subject\")",
"def get_name(self):\n return m2.x509_extension_get_name(self.x509_ext)",
"def get_subject(self):\n return self._subject",
"def subject(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subject\")",
"def subject_property_name(self):\n subject_property_name = 'subject'\n if 'participant' in self.schemas.keys():\n subject_property_name = 'participant'\n return subject_property_name",
"def getSubject(self, record):\n base_subject = super(CustomSMTPHandler, self).getSubject(record)\n try:\n hostname = platform.node()\n # pylint: disable=broad-except\n except Exception:\n hostname = 'Unknown'\n\n return base_subject.format(hostname)",
"def generate_x509_name(self, cn):\n name_attributes = [\n x509.NameAttribute(NameOID.COMMON_NAME, cn),\n ]\n if self.settings['csr_country_name']:\n name_attributes.append(\n x509.NameAttribute(\n NameOID.COUNTRY_NAME,\n self.settings['csr_country_name'],\n )\n )\n if self.settings['csr_state_or_province_name']:\n name_attributes.append(\n x509.NameAttribute(\n NameOID.STATE_OR_PROVINCE_NAME,\n self.settings['csr_state_or_province_name'],\n )\n )\n if self.settings['csr_locality_name']:\n name_attributes.append(\n x509.NameAttribute(\n NameOID.LOCALITY_NAME,\n self.settings['csr_locality_name'],\n )\n )\n if self.settings['csr_organization_name']:\n name_attributes.append(\n x509.NameAttribute(\n NameOID.ORGANIZATION_NAME,\n self.settings['csr_organization_name'],\n )\n )\n return x509.Name(name_attributes)",
"def subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subject\")",
"def get_subject_cn(self):\n subject = self.get_subject()\n cns = subject.get_entries_by_oid(name.OID_commonName)\n return [cn.get_value() for cn in cns]",
"def get_subject(self, idattr):\n return self.get_node('//Subject[@id=\"%s\"]' % idattr)",
"def subject(self):\n return self._subject",
"def name(self):\n cSld = self._element.cSld\n return cSld.get('name', default='')",
"def get_from_subject(mesid, mailbox):\n res, data = mailbox.fetch(mesid, 'BODY.PEEK[HEADER.FIELDS (SUBJECT FROM)]')\n if res != 'OK':\n raise RuntimeError('error in fetch call for {}'.format(mesid))\n # Apparently default character set for IMAP is UTF7\n myheads = data[0][1].decode('utf-7')\n name = get_from(myheads)\n\n subject = findall(r'Subject:\\s+(.*)\\r\\n', myheads)[0] # Assume match\n return ' '.join((name, ':', subject))"
]
| [
"0.7051864",
"0.70387065",
"0.69659764",
"0.6949392",
"0.6927241",
"0.6927241",
"0.67928165",
"0.6778444",
"0.6740706",
"0.6726373",
"0.6726373",
"0.6691816",
"0.6677457",
"0.66740286",
"0.6524414",
"0.6519214",
"0.65027255",
"0.6398681",
"0.6398138",
"0.6337108",
"0.6326856",
"0.6324386",
"0.6291588",
"0.62777776",
"0.62526244",
"0.6233873",
"0.61763716",
"0.6152289",
"0.6136461",
"0.6076692"
]
| 0.81360096 | 0 |
Get the CN part of subject. | def get_subject_cn(self):
subject = self.get_subject()
cns = subject.get_entries_by_oid(name.OID_commonName)
return [cn.get_value() for cn in cns] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subject(self):\n ri = self.get_request_info()\n if ri['subject'] is None:\n ri['subject'] = None\n # setup first RDN sequence\n ri['subject'][0] = None\n\n subject = ri['subject'][0]\n return name.X509Name(subject)",
"def getSubject(self):\r\n return self.msg[\"Subject\"]",
"def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())",
"def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())",
"def _get_primary_cn(tls_cert):\n return cert_parser.get_host_names(tls_cert)['cn']",
"def get_subject(self):\n return self._subject",
"def subject(self):\n return self.properties.get(\"subject\", None)",
"def subject(self):\n return self.get(\"subject\")",
"def subject(self) -> str:\n return self[\"Sns\"][\"Subject\"]",
"def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")",
"def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")",
"def get_from_subject(mesid, mailbox):\n res, data = mailbox.fetch(mesid, 'BODY.PEEK[HEADER.FIELDS (SUBJECT FROM)]')\n if res != 'OK':\n raise RuntimeError('error in fetch call for {}'.format(mesid))\n # Apparently default character set for IMAP is UTF7\n myheads = data[0][1].decode('utf-7')\n name = get_from(myheads)\n\n subject = findall(r'Subject:\\s+(.*)\\r\\n', myheads)[0] # Assume match\n return ' '.join((name, ':', subject))",
"def subject(self):\n return self._subject",
"def subject(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subject\")",
"def subject(self):\n if \"subject\" in self._prop_dict:\n return self._prop_dict[\"subject\"]\n else:\n return None",
"def subject(self):\n return self.mail.get('Subject')",
"def subject(self):\n return self.__subject",
"def getSubject(self, record):\n base_subject = super(CustomSMTPHandler, self).getSubject(record)\n try:\n hostname = platform.node()\n # pylint: disable=broad-except\n except Exception:\n hostname = 'Unknown'\n\n return base_subject.format(hostname)",
"def subject(self):\n subject = re.sub(RE_PATTERNS, '', self.header('Subject', ''))\n subject = re.sub(FW_PATTERNS, '', subject)\n return subject.strip()",
"def get_pubkey(self):\n return self._csr['certificationRequestInfo']['subjectPublicKeyInfo']",
"def get_name(self):\n return self.load_name(self.subject)",
"def cc(self, name):\n return \"\".join([n[0].upper() + n[1:] for n in name.split(\".\")])",
"def subject(self):\n subject = loader.render_to_string(self.subject_template_name,\n self.get_context())\n return ''.join(subject.splitlines())",
"def subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subject\")",
"def subject(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subject\")",
"def get_cid(self, cid_index, mission):\n\t\tfor cid, mission2 in cid_index.items():\n\t\t\tif mission2 is mission:\n\t\t\t\treturn cid",
"def get_cid(self):\n results = self.database.findall(text(\"select cid from cid_minter\"))\n if results:\n return results[0]\n else:\n err_msg = \"Database error: No CID was found in the cid_minter table.\"\n logging.error(err_msg)\n raise ValueError(err_msg)",
"def mcc(self):\n return self._mcc",
"def ca(self):\n\n return self._basic_constraints['ca'].native",
"def get_cid(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.cid"
]
| [
"0.67420936",
"0.6689823",
"0.6680702",
"0.6680702",
"0.66017765",
"0.65063447",
"0.6394712",
"0.63077354",
"0.6284451",
"0.62449497",
"0.62449497",
"0.6210434",
"0.6204365",
"0.6199062",
"0.61709285",
"0.6100831",
"0.6037614",
"0.59472805",
"0.59242284",
"0.5875864",
"0.57739663",
"0.5773045",
"0.57569075",
"0.5716124",
"0.5711671",
"0.5687189",
"0.5680749",
"0.56780386",
"0.5611886",
"0.557644"
]
| 0.7442487 | 0 |
Get the list of all X509 V3 Extensions on this CSR | def get_extensions(self, ext_type=None):
ext_attrs = [a for a in self.get_attributes()
if a['attrType'] == OID_extensionRequest]
if len(ext_attrs) == 0:
return []
else:
exts_der = ext_attrs[0]['attrValues'][0].asOctets()
exts = decoder.decode(exts_der, asn1Spec=rfc5280.Extensions())[0]
return [extension.construct_extension(e) for e in exts
if ext_type is None or e['extnID'] == ext_type._oid] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def explicit_list(self):\n exts = []\n for ext in self.extensions.values():\n if ext.implicit:\n continue\n exts.append(ext)\n return exts",
"def extensions(self):\n return list(self._list(extension.Extension, paginated=False))",
"def list_extensions(self, **_params):\r\n return self.get(self.extensions_path, params=_params)",
"def get_request_extensions(self):\n return []",
"def extensions(self):\n return self.properties.get('extensions',\n EntityCollection(self.context, Extension,\n ResourcePath(\"extensions\", self.resource_path)))",
"def get_required_extensions(self):\n return []",
"def extract_xkey_usage(self, ext):\n oidmap = {v: k for k, v in XKU_CODE_TO_OID.items()}\n res = []\n for oid in ext:\n if oid in oidmap:\n res.append(oidmap[oid])\n else:\n raise InvalidCertificate(\"Unsupported ExtendedKeyUsage oid: %s\" % (oid,))\n return res",
"def _set_extensions(self):\n\n self._critical_extensions = set()\n\n for extension in self['tbs_cert_list']['crl_extensions']:\n name = extension['extn_id'].native\n attribute_name = '_%s_value' % name\n if hasattr(self, attribute_name):\n setattr(self, attribute_name, extension['extn_value'].parsed)\n if extension['critical'].native:\n self._critical_extensions.add(name)\n\n self._processed_extensions = True",
"def getTackExt(self):\r\n tackExt = None\r\n # Search list in backwards order\r\n for x509 in self.x509List[::-1]:\r\n tlsCert = TlsCertificate(x509.bytes)\r\n if tlsCert.tackExt:\r\n if tackExt:\r\n raise SyntaxError(\"Multiple TACK Extensions\")\r\n else:\r\n tackExt = tlsCert.tackExt\r\n return tackExt",
"def get_ext_at(self, index):\n if index < 0 or index >= self.get_ext_count():\n raise IndexError\n \n return X509_Extension(m2.x509_get_ext(self.x509, index),\n _pyfree=0)",
"def extension_attributes(self):\n return self._extension_attributes",
"def extension_attributes(self):\n return self._extension_attributes",
"def extract_gnames(self, ext):\n res = []\n for gn in ext:\n if isinstance(gn, x509.RFC822Name):\n res.append('email:' + as_unicode(gn.value))\n elif isinstance(gn, x509.DNSName):\n res.append('dns:' + as_unicode(gn.value))\n elif isinstance(gn, x509.UniformResourceIdentifier):\n res.append('uri:' + as_unicode(gn.value))\n elif isinstance(gn, x509.IPAddress):\n res.append('ip:' + str(gn.value))\n elif isinstance(gn, x509.DirectoryName):\n val = self.extract_name(gn.value)\n res.append('dn:' + render_name(val))\n else:\n raise InvalidCertificate(\"Unsupported subjectAltName type: %s\" % (gn,))\n return res",
"def iter_extensions(self) -> t.Iterator[\"Extension\"]:\n return iter(sorted(self.extensions.values(), key=lambda x: x.priority))",
"def extensions_allow_lists(self) -> Sequence['outputs.GetComputeMachineAgentConfigurationExtensionsAllowListResult']:\n return pulumi.get(self, \"extensions_allow_lists\")",
"def get_supported_extensions(ext=\".as\"):\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result",
"def critical_extensions(self):\n\n if not self._processed_extensions:\n self._set_extensions()\n return self._critical_extensions",
"def critical_extensions(self):\n\n if not self._processed_extensions:\n self._set_extensions()\n return self._critical_extensions",
"def _set_extensions(self):\n\n self._critical_extensions = set()\n\n for extension in self['crl_entry_extensions']:\n name = extension['extn_id'].native\n attribute_name = '_%s_value' % name\n if hasattr(self, attribute_name):\n setattr(self, attribute_name, extension['extn_value'].parsed)\n if extension['critical'].native:\n self._critical_extensions.add(name)\n\n self._processed_extensions = True",
"def extensions_block_lists(self) -> Sequence['outputs.GetComputeMachineAgentConfigurationExtensionsBlockListResult']:\n return pulumi.get(self, \"extensions_block_lists\")",
"def all_editable_exts():\r\n exts = []\r\n for (language, extensions) in sourcecode.ALL_LANGUAGES.items():\r\n exts.extend(list(extensions))\r\n return ['.' + ext for ext in exts]",
"def add_extensions(self, ext_stack):\n return m2.x509_req_add_extensions(self.req, ext_stack._ptr())",
"def get_optional_extensions(self):\n return []",
"def get_network_extensions(self):\n return self._neutron_extensions()",
"def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret",
"def parse_extensions(data, allow_quoted_string=False):\n\n state = http_header_util.ParsingState(data)\n\n extension_list = []\n while True:\n extension = _parse_extension(state, allow_quoted_string)\n if extension is not None:\n extension_list.append(extension)\n\n http_header_util.consume_lwses(state)\n\n if http_header_util.peek(state) is None:\n break\n\n if not http_header_util.consume_string(state, ','):\n raise HandshakeException(\n 'Failed to parse Sec-WebSocket-Extensions header: '\n 'Expected a comma but found %r' %\n http_header_util.peek(state))\n\n http_header_util.consume_lwses(state)\n\n if len(extension_list) == 0:\n raise HandshakeException(\n 'Sec-WebSocket-Extensions header contains no valid extension')\n\n return extension_list",
"def get_extension_options(self):\n options = []\n for extension in self.extensions:\n extension_options = getattr(extension, \"OPTIONS\", None)\n if extension_options:\n options.extend(extension_options)\n return options",
"def extensions(self) -> pulumi.Output[Optional[Sequence['outputs.MachineExtensionInstanceViewResponse']]]:\n return pulumi.get(self, \"extensions\")",
"def extensions(self) -> Tuple[str, ...]:\n raise NotImplementedError",
"def get_extensions(self, param_groups):\n extensions = [\n ext_cls(subsampling=subsampling)\n for ext_cls, subsampling in self._merged_extensions.items()\n ]\n\n if not self._compute_gammas:\n extensions = [\n ext\n for ext in extensions\n if not isinstance(ext, self._extension_cls_first)\n ]\n\n return extensions"
]
| [
"0.6225858",
"0.6220105",
"0.6191462",
"0.5990831",
"0.5963183",
"0.59544784",
"0.5882228",
"0.581221",
"0.57532495",
"0.5679225",
"0.56783944",
"0.56783944",
"0.56427705",
"0.5603688",
"0.55754435",
"0.5570066",
"0.5565384",
"0.5565384",
"0.5538455",
"0.5530417",
"0.5508871",
"0.5489986",
"0.5484069",
"0.54835737",
"0.5468498",
"0.54270506",
"0.5414651",
"0.5361293",
"0.53477263",
"0.534294"
]
| 0.64339465 | 0 |
Sorts plot data, labels, and colors in order of increasing median. | def _sort_distributions_by_median(plot_data, plot_labels, plot_colors):
sorted_data = []
for distribution, label, color in zip(plot_data, plot_labels, plot_colors):
sorted_data.append((median(distribution), distribution, label, color))
sorted_data.sort()
plot_data = []
plot_labels = []
plot_colors = []
for _, distribution, label, color in sorted_data:
plot_data.append(distribution)
plot_labels.append(label)
plot_colors.append(color)
return plot_data, plot_labels, plot_colors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sort_distributions(plot_data, plot_labels, plot_colors, sort_type):\r\n if sort_type == 'median':\r\n sort_key = lambda item: np.median(item[0])\r\n elif sort_type == 'alphabetical':\r\n sort_key = lambda item: item[1]\r\n else:\r\n raise ValueError(\"Invalid sort type '%s'.\" % sort_type)\r\n\r\n # Taken from http://stackoverflow.com/a/9764364\r\n return zip(*sorted(zip(plot_data, plot_labels, plot_colors), key=sort_key))",
"def argsortxaxis(\n xaxis: List[str],\n xsort: List[bool],\n stats: Dict[str, np.ndarray],\n nbmatch = re.compile(r\"^\\d\\+-.*\")\n):\n axes = pd.DataFrame(dict(\n {\n str(2*i+1): (\n stats['x']\n if len(xaxis) == 1 else\n [stats['x'][k][i] for k in range(len(stats['x']))]\n )\n for i in range(len(xaxis))\n },\n value = -stats['boxcenter']\n ))\n\n for isort in xsort:\n axes.set_index(str(2*isort+1), inplace = True)\n axes[str(2*isort)] = axes.groupby(str(2*isort+1)).value.median()\n axes.reset_index(inplace = True)\n\n def _cnt(itm):\n return itm.count(INVISIBLE)\n\n for i in range(1, 2*len(xaxis)+1, 2):\n col = axes[str(i)]\n if any(np.issubdtype(col.dtype, j) for j in (np.number, np.bool_)):\n if str(i-1) in axes:\n # reverse orders: first the label, second the median value\n axes.rename(columns = {str(i): str(i-1), str(i-1): str(i)}, inplace = True)\n continue\n\n vals = col.unique()\n if all(nbmatch.match(j) for j in vals):\n # the column is of type; [\"1-track1\", \"2-track2\", ...]\n # we keep only the track index\n axes[str(i)] = [int(j.split('-')) for j in col]\n\n elif any(j.startswith(INVISIBLE) for j in vals):\n # the column has labels sorted according to the invisible character.\n # count those and set them as the main order\n col = col.apply(_cnt)\n if str(i-1) in axes:\n # reverse orders: first the label, second the median value\n axes[str(i)] = axes[str(i-1)]\n axes[str(i-1)] = col\n else:\n axes[str(i)] = col\n\n axes.sort_values(\n [*(str(i) for i in range(2*len(xaxis)+1) if str(i) in axes), 'value'],\n inplace = True\n )\n return axes.index.values",
"def main_sort():\n \n input_1 = [7, 6, 5, 4, 3, 2, 1]\n print find_median(input_1)\n \n input_2 = [5, 4, 3, 2, 1, 6, 7]\n print find_median(input_2)\n \n input_3 = [1, 2, 3, 4, 5, 7, 6]\n print find_median(input_3)\n \n input_4 = [1, 1, 3, 3, 2, 2, 4]\n print find_median(input_4)",
"def sort(self, pivot='rand'):\n\n choices = ('first', 'median', 'last', 'rand')\n assert pivot in choices\n self.choice = pivot\n return self._recursiveSort(self.obj)",
"def test_sort_distributions_median(self):\r\n exp = [([0, 0, 0, 1], [2, 1, 1], [1], [1, 2, 3]),\r\n ('bar', 'baz', 'zab', 'foo'), ('b', 'r', 'b', 'w')]\r\n obs = _sort_distributions(\r\n [[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],\r\n ['foo', 'baz', 'bar', 'zab'], ['w', 'r', 'b', 'b'], 'median')\r\n self.assertEqual(obs, exp)",
"def start_sort():\n global data\n if algo_box.get() == \"Bubble Sort\":\n bubble_sort(data, display_data, speed_scale.get())\n elif algo_box.get() == \"Merge Sort\":\n merge_sort(data, display_data, speed_scale.get())",
"def start_sorting(self):\n if self.sorting:\n return None\n self.sorting = True\n\n passes = 0\n while self.sorting:\n swap_done = False\n for i in range(len(self.sort_list)-passes-1):\n if not self.sorting:\n break\n if self.sort_list[i][0] > self.sort_list[i+1][0]:\n self.sort_list[i], self.sort_list[i+1] = self.sort_list[i+1], self.sort_list[i]\n self.window.coords(self.sort_list[i][1], 4*i+50, 20, 4*i+50, self.sort_list[i][0])\n self.window.coords(self.sort_list[i+1][1], 4*(i+1)+50, 20, 4*(i+1)+50, self.sort_list[i+1][0])\n self.window.itemconfig(self.sort_list[i][1], fill='red')\n self.window.itemconfig(self.sort_list[i+1][1], fill='red')\n swap_done = True\n self.window.update()\n self.window.itemconfig(self.sort_list[i][1], fill='black')\n self.window.itemconfig(self.sort_list[i+1][1], fill='black')\n self.window.update()\n passes += 1\n\n if not swap_done:\n self.sorting = False\n for line in self.sort_list:\n self.window.itemconfig(line[1], fill='green')\n else:\n self.window.itemconfig(self.sort_list[i+1][1], fill='blue')\n self.window.update()",
"def median(self):\n # TO DO\n pass",
"def median(self):\n self.data.sort()\n\n if len(self.data) % 2 == 1:\n median = self.data[int(self.size/2)]\n else:\n median = (self.data[int(self.size/2 - 1)] + \n self.data[int(self.size/2)]) / 2\n return median",
"def plotStatsDict(statsDict, name='', proteins=None, offset=0.0, markerSize=12, color='#e31a1c', yMax=1.5, alpha=1.0,\r\n median=False, figSize = (22,5), noFill=False, mew=1, yMin=-0.05, highlightMed=False, hms=2, hmFilled=True,\r\n xTickLabels=None):\r\n if proteins is None:\r\n proteins = qMS.sort_nicely(statsDict.keys())\r\n\r\n if xTickLabels is None:\r\n xTickLabels = [item for item in proteins]\r\n\r\n if hmFilled:\r\n medColor=color\r\n else:\r\n medColor='none'\r\n \r\n if noFill:\r\n edgeColor=color\r\n color='none'\r\n else:\r\n edgeColor='black'\r\n \r\n xAxis = range(1,len(proteins)+1)\r\n fig = pylab.figure(figsize=figSize)\r\n ax = fig.add_subplot(111)\r\n\r\n xs = []\r\n ys = []\r\n for x in xAxis:\r\n p = proteins[x-1]\r\n if p in statsDict.keys():\r\n if median:\r\n xs.append(x+offset)\r\n ys.append(numpy.median(statsDict[p]))\r\n else:\r\n for v in statsDict[p]:\r\n xs.append(x+offset)\r\n ys.append(v)\r\n\r\n pylab.grid(b=True, which='major', color='grey', linestyle='--', axis='y', linewidth=1.5, alpha=0.5)\r\n pylab.grid(b=True, which='major', color='grey', linestyle='-', axis='x', linewidth=1.5, alpha=0.75)\r\n ax.plot(xs, ys, 'o', mfc=color, markeredgecolor=edgeColor, mew=mew, markersize=markerSize, label=name, alpha=alpha)\r\n\r\n if highlightMed:\r\n mx = []\r\n my = []\r\n for x in xAxis:\r\n p = proteins[x-1]\r\n if p in statsDict.keys():\r\n mx.append(x+offset)\r\n my.append(numpy.median(statsDict[p]))\r\n ax.plot(mx, my, '_', color='black', markeredgecolor='black', mew=2, markersize=markerSize*hms)\r\n\r\n\r\n pylab.xticks(xAxis, xTickLabels, rotation=45)\r\n pylab.xlim(1, len(proteins)+1)\r\n ####################################\r\n ####################################\r\n if yMin == -0.05:\r\n sub = 0.0\r\n else:\r\n sub = yMin\r\n pylab.yticks([0, (yMax-sub)/5.0, 2*(yMax-sub)/5.0, 3*(yMax-sub)/5.0, 4*(yMax-sub)/5.0, (yMax-sub)])\r\n pylab.ylim(yMin, yMax)\r\n ax.set_axisbelow(True)\r\n return ax",
"def calc_median(numbers):\n middle_index = len(numbers) // 2\n return sorted(numbers[middle_index]) # sorted returns the numbers sorted without changing",
"def plot_discordance_by_cohort(discordance_dictionary, cohort_names, ylabel,\n title, ylimit, plot_filename, sample_size_threshold,\n medians_file_object):\n cohort_medians = []\n cohort_means = []\n cohorts_with_data = []\n minimum = float(\"inf\")\n maximum = float(\"-inf\")\n \n for cohort_name in cohort_names:\n cohort_values = discordance_dictionary[cohort_name]\n if len(cohort_values) >= sample_size_threshold:\n median = statistics.median(cohort_values)\n mean = statistics.mean(cohort_values)\n cohorts_with_data.append(cohort_name)\n cohort_medians.append(median)\n cohort_means.append(mean)\n if min(cohort_values) < minimum:\n minimum = min(cohort_values)\n if max(cohort_values) > maximum:\n maximum = max(cohort_values)\n cohorts_sorted_by_median = [cohort for _, cohort in sorted(zip(cohort_medians, cohorts_with_data))]\n \n mean_of_means = statistics.mean(cohort_means)\n medians_file_object.write(\"#mean of means = \" + str(round(mean_of_means, 2)) + \"\\n\")\n medians_file_object.write(\"#minimum discordance = \" + str(round(minimum, 2)) + \"\\n\")\n medians_file_object.write(\"#maximum discordance = \" + str(round(maximum, 2)) + \"\\n\")\n \n sorted_cohort_medians = sorted(cohort_medians)\n for i in range(len(cohorts_sorted_by_median)):\n print(cohorts_sorted_by_median[i], sorted_cohort_medians[i])\n medians_file_object.write(cohorts_sorted_by_median[i] + \" \" + str(sorted_cohort_medians[i]) + \"\\n\")\n \n median_of_medians = statistics.median(cohort_medians)\n plt.figure(figsize=(12, 6))\n plt.axhline(y = median_of_medians, color = \"blue\")\n plt.text(1, ylimit, \"median of medians = \" + str(round(median_of_medians, 2)),\n fontsize = 14)\n values_in_sorted_key_order = []\n for cohort_name in cohorts_sorted_by_median:\n values_in_sorted_key_order.append(discordance_dictionary[cohort_name])\n \n box_data = plt.boxplot(values_in_sorted_key_order, patch_artist = True)\n \n for item in ['boxes', 'whiskers', 'fliers', 'medians', 'caps']:\n plt.setp(box_data[item], color=\"blue\")\n plt.setp(box_data[\"boxes\"], facecolor=\"springgreen\")\n plt.setp(box_data[\"fliers\"], markeredgecolor=\"springgreen\")\n \n #setting axis labels\n plt.xticks(range(1, len(cohorts_sorted_by_median) + 1),\n cohorts_sorted_by_median, rotation = 90, fontsize = 14)\n plt.yticks(fontsize = 14)\n plt.ylim(0 - ylimit * 0.1, ylimit * 1.1)\n plt.xlabel(\"Cohort\", fontsize = 16)\n plt.ylabel(ylabel, fontsize = 16)\n plt.title(title, fontsize = 18)\n plt.savefig(plot_filename, bbox_inches = \"tight\")\n plt.show()",
"def array_sort():\n to_concat = []\n for centroid_rgb, cluster in itertools.izip(centroids_rgb, self.clusters):\n # no need to revisit ratio\n new_idxed_arr = tf.concat(1,[tf.slice(cluster, [0,0], [-1,2]),\n tf.tile(tf.expand_dims(\n tf.constant(centroid_rgb), 0),\n multiples=[len(cluster.eval()), 1])])\n to_concat.append(new_idxed_arr)\n\n concated = tf.concat(0, to_concat)\n sorted_arr = np.array(sorted(concated.eval().tolist()), dtype=np.uint8)[:, 2:]\n\n new_img = Image.fromarray(sorted_arr.reshape([self.m, self.n, self.chann]))\n if save:\n new_img.save(outfile, format=format_)\n os.popen(\"open '{}'\".format(outfile))\n else:\n new_img.show()",
"def sort_animals(all_animals):\n def get_key(a):\n return a.row + 0.001 * a.col\n\n all_animals.sort(key=get_key)",
"def perm_plot(obs, perm, p, fig_title, tails = 1):\n plot_rows = len(perm.keys())\n \n fig, axes = plt.subplots(plot_rows, 1)\n\n for n, term in enumerate(perm.keys()):\n\n if plot_rows > 1:\n sns.distplot(perm[term], ax = axes[n], norm_hist = True)\n\n #Formatting\n axes[n].axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes[n].axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes[n].axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n axes[n].set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes[n].set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes[n].text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes[n].transAxes)\n else:\n axes[n].text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes[n].transAxes) \n \n\n for tick in axes[n].xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes[n].yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n \n if n == np.around(plot_rows / 2, decimals = 0) - 1:\n axes[n].legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n\n else:\n sns.distplot(perm[term], ax = axes, norm_hist = True)\n\n #Formatting\n axes.axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes.axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes.axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n \n axes.set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes.set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes.text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes.transAxes)\n else:\n axes.text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes.transAxes) \n \n for tick in axes.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n\n axes.legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n if fig_title != None: \n fig.suptitle(fig_title, fontsize = 24, y = 1.05) \n \n plt.tight_layout() \n plt.show()\n \n return(fig, axes)",
"def on_sort(self, param, state):\n if state > 0: # From unchecked to checked\n self.grouped = False\n self.uncheck_group()\n if param not in self.param_list:\n self.param_list.append(param)\n else: # From checked to unchecked\n if param in self.param_list:\n if len(self.param_list) == 1:\n self.param_list = []\n else:\n self.param_list.remove(param)\n self.sorted_labels = utils.sort_by_param(self.nwb_path_list.values(), self.param_list)\n if self.param_list:\n self.sorted = True\n else:\n self.sorted = False\n self.musketeers_widget.session_widget.update_text_filter()\n self.musketeers_widget.session_widget.populate(self.sorted_labels)",
"def addStatsDictToPlot(statsDict, ax, name='', offset=0.0, markerSize=12, color='#377db8', median=False, noFill=False, mew=1, \r\n highlightMed=False, hms=2, hmFilled=True, alpha=1.0, proteins=None):\r\n\r\n if proteins is None:\r\n a = ax.get_xmajorticklabels()\r\n proteins = [t.get_text() for t in a]\r\n\r\n if hmFilled:\r\n medColor=color\r\n else:\r\n medColor='none'\r\n \r\n if noFill:\r\n edgeColor=color\r\n color='none'\r\n else:\r\n edgeColor='black'\r\n\r\n xAxis = range(1,len(proteins)+1)\r\n\r\n xs = []\r\n ys = []\r\n for x in xAxis:\r\n p = proteins[x-1]\r\n if p in statsDict.keys():\r\n if median:\r\n xs.append(x+offset)\r\n ys.append(numpy.median(statsDict[p]))\r\n else:\r\n for v in statsDict[p]:\r\n xs.append(x+offset)\r\n ys.append(v)\r\n\r\n ax.plot(xs, ys, 'o', mfc=color, markeredgecolor=edgeColor, mew=mew, markersize=markerSize, label=name, alpha=alpha)\r\n \r\n if highlightMed:\r\n mx=[]\r\n my=[]\r\n for x in xAxis:\r\n p = proteins[x-1]\r\n if p in statsDict.keys():\r\n mx.append(x+offset)\r\n my.append(numpy.median(statsDict[p]))\r\n ax.plot(mx, my, '_', color='black', markeredgecolor='black', mew=2, markersize=markerSize*hms)\r\n\r\n ax.set_axisbelow(True)\r\n return ax",
"def click_timed_sorting_button(self):\n self.my_sorted_list = self.sorting.sorting_alg(self.my_list)\n self.label_2[\"text\"] = self.set_my_sorted_list_label()",
"def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False",
"def median(p, col, row):\n neighbors = []\n # Put the 8 surrounding pixels into neighbors\n for i in range(col-1, col+2):\n for j in range(row-1, row+2):\n try:\n neighbor = newimg.getPixel(i, j)\n neighbors.append(neighbor)\n except:\n continue\n nlen = len(neighbors)\n if nlen:\n red = [neighbors[i][0] for i in range(nlen)]\n green = [neighbors[i][1] for i in range(nlen)]\n blue = [neighbors[i][2] for i in range(nlen)]\n # Sort the lists so we can later find the median.\n for i in [red, green, blue]:\n i.sort()\n # If the list has an odd number of items in it.\n if nlen % 2:\n p.red = red[len(red)/2]\n p.green = green[len(green)/2]\n p.blue = blue[len(blue)/2]\n else:\n p.red = (red[len(red)/2] + red[len(red)/2-1])/2\n p.green = (green[len(green)/2] + green[len(green)/2-1])/2\n p.blue = (blue[len(blue)/2] + blue[len(blue)/2-1])/2 \n\n return p",
"def plot_sorted_accuracies(results):\n ###TODO\n #print(results)\n \n #step 1 -> sort accuracies and get x and y\n # x = setting\n # y = sorted list of accuracies\n #results.sort(key=lambda x:(x['accuracy'])) \n # don't use it ->it will change results from main as well\n \n #print(results)\n\n acc = []\n \n x = list(range(len(results)))\n \n for d in results:\n #print('dict=',d)\n acc.append(d['accuracy'])\n \n acc.sort(key=lambda x:(x))\n #print('acc = ',acc)\n \n #step 2 -> plot figure\n fig1 = plt.figure(1) \n plt.plot(x,acc)\n plt.ylabel('accuracy')\n plt.xlabel('settings')\n \n plt.show()\n \n fig1.savefig('accuracies.png')",
"def heap_sort(data_list, draw_data, time_value):\n\n # heapifies the list\n for i in range((len(data_list) // 2) - 1, -1, -1):\n heapify(data_list, len(data_list), i, draw_data, time_value)\n\n # draw the heapified list as blue before starting the popping from the heap\n draw_data(data_list, [\"blue\" for i in range(len(data_list))])\n time.sleep(time_value)\n\n for i in range(len(data_list) - 1, 0, -1):\n _swap(data_list, i, 0)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the two elements being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == 0):\n color_list[x] = \"green\"\n\n # visualize the swap and wait the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n # heapify the remaining portion of the list\n heapify(data_list, i, 0, draw_data, time_value)\n\n # color the whole list as green after the sort\n draw_data(data_list, [\"green\" for i in range(len(data_list))])",
"def median_link(clusters, i, j, dendrogram):\n update_fn = lambda d_ik,d_jk: 0.5*d_ik + 0.5*d_jk + -0.25*clusters[i,j]\n return _general_link(clusters, i, j, update_fn)",
"def median(self, name, **kwargs):\n data = self.get(name,**kwargs)\n return np.percentile(data,[50])",
"def plot_sorted_accuracies(results):\n acc = []\n for comb in results:\n acc.append(comb[\"accuracy\"])\n sorted_list = sorted(acc)\n plt.plot(range(42),sorted_list,'bo-')\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Setting\")\n plt.savefig(\"accuracies.png\")",
"def sort_simulations(df_ts, dyn_dend_order):\n\n # Create a dictionary with the order of each simulation row in the plot \n dyn_dend_order_dict = { dyn_name : dyn_dend_order.index(dyn_name) for dyn_name in dyn_dend_order }\n\n # Adding column based in new order recieved from clustering\n df_ts['clust_order'] = df_ts['Id'].apply(lambda x: dyn_dend_order_dict[x])\n\n #Sorting by ballesteros Id's (helixloop column) and clustering order\n df_ts['helixloop'] = df_ts['Position'].apply(lambda x: re.sub(r'^(\\d)x',r'\\g<1>0x',x)) \n df_ts = df_ts.sort_values([\"helixloop\",'clust_order'])\n\n #Drop sort columns once used\n df_ts.drop(['helixloop','clust_order'], axis = 1, inplace = True)\n \n return df_ts",
"def make_median(s, q2):\n return graph_objs.Scatter(\n x=[s],\n y=[q2],\n text=['median: ' + '{:0.2f}'.format(q2)],\n mode='markers',\n marker=dict(symbol='square',\n color='rgb(255,255,255)'),\n hoverinfo='text'\n )",
"def _sort_phot(self, verbose=False):\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass",
"def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))",
"def plot_raw_data(ratings, pl = True):\n # do statistics.\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n sorted_num_movies_per_user = np.sort(num_items_per_user)[::-1]\n sorted_num_users_per_movie = np.sort(num_users_per_item)[::-1]\n\n if pl:\n # plot\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(sorted_num_movies_per_user, color='blue')\n ax1.set_xlabel(\"users\")\n ax1.set_ylabel(\"number of ratings (sorted)\")\n ax1.grid()\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(sorted_num_users_per_movie)\n ax2.set_xlabel(\"items\")\n ax2.set_ylabel(\"number of ratings (sorted)\")\n #ax2.set_xticks(np.arange(0, 2000, 300))\n ax2.grid()\n\n plt.tight_layout()\n plt.savefig(\"../results/stat_ratings\")\n plt.show()\n # plt.close()\n return num_items_per_user, num_users_per_item"
]
| [
"0.67502403",
"0.61370903",
"0.5812121",
"0.55651194",
"0.5400599",
"0.5361287",
"0.5318083",
"0.52853227",
"0.52681786",
"0.52646977",
"0.519699",
"0.51744735",
"0.51643634",
"0.5154642",
"0.51214653",
"0.50933886",
"0.5091326",
"0.5087971",
"0.50839293",
"0.5041131",
"0.50262123",
"0.50184447",
"0.5000204",
"0.4990621",
"0.49781775",
"0.4971028",
"0.494655",
"0.49291265",
"0.49112839",
"0.49068704"
]
| 0.802016 | 0 |
Colors one field by another. Returns a list of matplotlibcompatible colors, one for each of the input field_states. Also returns a dictionary mapping color_by_field states to colors (useful for building a legend, for example). If there are not enough colors available (they are drawn from qiime.colors.data_colors), an error will be raised as the color mapping (and legend) will be ambiguous. A onetoone mapping must exist between each field_state and its corresponding color_by field state (otherwise it is unclear which corresponding color_by field state should be used to color it by). An error will be raised if this onetoone mapping does not exist. | def _color_field_states(map_f, samp_ids, field, field_states, color_by_field):
colors = []
color_pool = [matplotlib_rgb_color(data_colors[color].toRGB())
for color in data_color_order]
metadata_map = MetadataMap.parseMetadataMap(map_f)
for field_to_check in field, color_by_field:
if field_to_check not in metadata_map.CategoryNames:
raise ValueError("The field '%s' is not in the metadata mapping "
"file's column headers." % field_to_check)
all_field_states = metadata_map.getCategoryValues(samp_ids, field)
all_color_by_states = metadata_map.getCategoryValues(samp_ids,
color_by_field)
if len(set(field_states) - set(all_field_states)) != 0:
raise ValueError("Encountered unrecognizable field state(s) in %r "
"for field '%s'." % (field_states, field))
# Build mapping from one field to the other.
field_mapping = defaultdict(list)
for field_state, color_by_state in zip(all_field_states,
all_color_by_states):
if field_state in field_states:
field_mapping[field_state].append(color_by_state)
# For each of the specified input field states, find its corresponding
# "color by" field state and give it a color if it hasn't been assigned one
# yet. Make sure we have enough colors and there is a one-to-one mapping.
color_mapping = {}
for field_state in field_states:
color_by_states = set(field_mapping[field_state])
if len(color_by_states) != 1:
raise ValueError("The field '%s' to color by does not have a "
"one-to-one mapping with field '%s'. Coloring "
"would be ambiguous." % (color_by_field, field))
color_by_state = list(color_by_states)[0]
if color_by_state not in color_mapping:
if len(color_pool) > 0:
color_mapping[color_by_state] = color_pool.pop(0)
else:
raise ValueError("There are not enough available QIIME colors "
"to color each of the field states in field "
"'%s'. Coloring would be ambiguous." %
color_by_field)
colors.append(color_mapping[color_by_state])
return colors, color_mapping | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _color_field_states(map_f, samp_ids, field, field_states, color_by_field):\r\n colors = []\r\n color_pool = [matplotlib_rgb_color(data_colors[color].toRGB())\r\n for color in data_color_order]\r\n metadata_map = MetadataMap.parseMetadataMap(map_f)\r\n\r\n for field_to_check in field, color_by_field:\r\n if field_to_check not in metadata_map.CategoryNames:\r\n raise ValueError(\"The field '%s' is not in the metadata mapping \"\r\n \"file's column headers.\" % field_to_check)\r\n\r\n all_field_states = metadata_map.getCategoryValues(samp_ids, field)\r\n all_color_by_states = metadata_map.getCategoryValues(samp_ids,\r\n color_by_field)\r\n\r\n if len(set(field_states) - set(all_field_states)) != 0:\r\n raise ValueError(\"Encountered unrecognizable field state(s) in %r \"\r\n \"for field '%s'.\" % (field_states, field))\r\n\r\n # Build mapping from one field to the other.\r\n field_mapping = defaultdict(list)\r\n for field_state, color_by_state in zip(all_field_states,\r\n all_color_by_states):\r\n if field_state in field_states:\r\n field_mapping[field_state].append(color_by_state)\r\n\r\n # For each of the specified input field states, find its corresponding\r\n # \"color by\" field state and give it a color if it hasn't been assigned one\r\n # yet. Make sure we have enough colors and there is a one-to-one mapping.\r\n color_mapping = {}\r\n for field_state in field_states:\r\n color_by_states = set(field_mapping[field_state])\r\n\r\n if len(color_by_states) != 1:\r\n raise ValueError(\"The field '%s' to color by does not have a \"\r\n \"one-to-one mapping with field '%s'. Coloring \"\r\n \"would be ambiguous.\" % (color_by_field, field))\r\n\r\n color_by_state = list(color_by_states)[0]\r\n if color_by_state not in color_mapping:\r\n if len(color_pool) > 0:\r\n color_mapping[color_by_state] = color_pool.pop(0)\r\n else:\r\n raise ValueError(\"There are not enough available QIIME colors \"\r\n \"to color each of the field states in field \"\r\n \"'%s'. Coloring would be ambiguous.\" %\r\n color_by_field)\r\n\r\n colors.append(color_mapping[color_by_state])\r\n\r\n return colors, color_mapping",
"def test_color_field_states(self):\r\n # All sample IDs and field states.\r\n exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0)],\r\n {'y': (0.0, 0.0, 1.0), 'x': (1.0, 0.0, 0.0)})\r\n obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],\r\n 'Foo', ['a', 'b', 'c'], 'Bar')\r\n self.assertEqual(exp[0], obs[0])\r\n assert_almost_equal(obs[1]['x'], exp[1]['x'])\r\n assert_almost_equal(obs[1]['y'], exp[1]['y'])\r\n\r\n # Subset of sample IDs and field states.\r\n exp = ([(1.0, 0.0, 0.0)], {'x': (1.0, 0.0, 0.0)})\r\n obs = _color_field_states(self.map_f, ['1', '2'], 'Foo', ['a'], 'Bar')\r\n self.assertEqual(exp[0], obs[0])\r\n assert_almost_equal(obs[1]['x'], exp[1]['x'])\r\n\r\n # Color field by itself (useless but still allowed).\r\n exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.9490196078431372,\r\n 0.45098039215686275, 0.01568627450980392)], {'a':\r\n (1.0, 0.0, 0.0),\r\n 'c': (0.9490196078431372, 0.45098039215686275,\r\n 0.01568627450980392), 'b': (0.0, 0.0, 1.0)})\r\n obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],\r\n 'Foo', ['a', 'b', 'c'], 'Foo')\r\n self.assertEqual(exp[0], obs[0])\r\n assert_almost_equal(obs[1]['a'], exp[1]['a'])\r\n assert_almost_equal(obs[1]['b'], exp[1]['b'])\r\n assert_almost_equal(obs[1]['c'], exp[1]['c'])",
"def get_field_state_comparisons(dist_matrix_header, dist_matrix,\r\n mapping_header, mapping, field,\r\n comparison_field_states,\r\n suppress_symmetry_and_hollowness_check=False):\r\n _validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping,\r\n field)\r\n\r\n # Make sure each comparison group field state is in the specified field.\r\n if not comparison_field_states:\r\n raise ValueError(\"You must provide at least one field state to \"\r\n \"compare to all of the other field states.\")\r\n mapping_data = [mapping_header]\r\n mapping_data.extend(mapping)\r\n groups = group_by_field(mapping_data, field)\r\n for field_state in comparison_field_states:\r\n if field_state not in groups:\r\n raise ValueError(\"The comparison group field state '%s' is not in \"\r\n \"the provided mapping file's field '%s'.\"\r\n % (field_state, field))\r\n\r\n # Grab a list of all other field states (besides the ones in\r\n # comparison_field_states). These will be the field states that the states\r\n # in comparison_field_states will be compared against.\r\n field_states = [group for group in groups.keys()\r\n if group not in comparison_field_states]\r\n\r\n # Get between distance groupings for the field of interest.\r\n between_groupings = get_grouped_distances(dist_matrix_header, dist_matrix,\r\n mapping_header, mapping, field, within=False,\r\n suppress_symmetry_and_hollowness_check=\r\n suppress_symmetry_and_hollowness_check)\r\n\r\n # Build up our 2D dictionary giving the distances between a field state and\r\n # a comparison group field state by filtering out the between_groupings\r\n # list to include only the comparisons that we want.\r\n result = {}\r\n for field_state in field_states:\r\n result[field_state] = {}\r\n for comp_field_state in comparison_field_states:\r\n result[field_state][comp_field_state] = []\r\n for group in between_groupings:\r\n if ((group[0] == field_state or group[1] == field_state)\r\n and (group[0] == comp_field_state or\r\n group[1] == comp_field_state)):\r\n # We've found a group of distances between our comparison\r\n # field state and the current field state, so keep the\r\n # data.\r\n result[field_state][comp_field_state] = group[2]\r\n return result",
"def get_state_colors():\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )",
"def fl_mapcolor(colr, red, green, blue):\n _fl_mapcolor = library.cfuncproto(\n library.load_so_libforms(), \"fl_mapcolor\",\\\n cty.c_ulong, [xfdata.FL_COLOR, cty.c_int, cty.c_int, cty.c_int],\n \"\"\"unsigned long fl_mapcolor(FL_COLOR col, int r, int g, int b)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n i_red = library.convert_to_intc(red)\n i_green = library.convert_to_intc(green)\n i_blue = library.convert_to_intc(blue)\n library.keep_elem_refs(colr, ul_colr, red, green, blue, i_red, \\\n i_green, i_blue)\n retval = _fl_mapcolor(ul_colr, i_red, i_green, i_blue)\n return retval",
"def fl_get_object_color(ptr_flobject):\n _fl_get_object_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_color\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.POINTER(xfdata.FL_COLOR),\n cty.POINTER(xfdata.FL_COLOR)], \\\n \"\"\"void fl_get_object_color(FL_OBJECT * ob, FL_COLOR * col1,\n FL_COLOR * col2)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n ul_fgcolr, ptr_fgcolr = library.make_FL_COLOR_and_pointer()\n ul_bgcolr, ptr_bgcolr = library.make_FL_COLOR_and_pointer()\n library.keep_elem_refs(ptr_flobject, ul_fgcolr, ptr_fgcolr, \\\n ul_bgcolr, ptr_bgcolr)\n _fl_get_object_color(ptr_flobject, ptr_fgcolr, ptr_bgcolr)\n return ul_fgcolr.value, ul_bgcolr.value",
"def test_color_field_states_invalid_input(self):\r\n # Field to color not in mapping file.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Fooz', ['a', 'b'], 'Bar')\r\n\r\n # Field to color by not in mapping file.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Foo', ['a', 'b'], 'Barz')\r\n\r\n # Field states are not found in field (due to subset of sample IDs).\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Foo', ['a', 'c'], 'Bar')\r\n\r\n # Field states are not found in field (not in column at all).\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'z'], 'Bar')\r\n\r\n # Not enough colors.\r\n samp_ids = [str(i) for i in range(1, 31)]\r\n self.assertRaises(ValueError, _color_field_states,\r\n self.too_many_colors_map_f, samp_ids, 'Description', samp_ids,\r\n 'Description')\r\n\r\n # No one-to-one mapping.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'b'], 'Baz')",
"def map_colorings(palette, colorings):\n mapped_colorings = []\n for coloring in colorings:\n mapped_colorings.append(map_coloring(palette, coloring))\n return mapped_colorings",
"def umap_color(metadata, color_field, n_colors, dtype=None, palette=magma):\n palette = palette(n_colors)\n if color_field is None:\n metadata_cf = metadata\n else:\n metadata_cf = metadata[color_field]\n\n if dtype is None:\n dtype = type(metadata_cf[0])\n field = [f if isinstance(f, dtype) else dtype()\n for f in metadata_cf]\n n_colors = n_colors if len(set(field)) >= n_colors else len(set(field))\n field_rank = {f: i / len(field) for i, f in enumerate(sorted(field))}\n return [palette[int(field_rank[f] * n_colors)] for f in field]",
"def color(self, values, ids=(), key_on='feature.id', palette='YlOrBr', **kwargs):\n # Set values and ids to both be simple sequences by inspecting values\n id_name, value_name = 'IDs', 'values'\n if isinstance(values, collections.abc.Mapping):\n assert not ids, 'IDs and a map cannot both be used together'\n if hasattr(values, 'columns') and len(values.columns) == 2:\n table = values\n ids, values = table.columns\n id_name, value_name = table.labels\n else:\n dictionary = values\n ids, values = list(dictionary.keys()), list(dictionary.values())\n if len(ids) != len(values):\n assert len(ids) == 0\n # Use indices as IDs\n ids = list(range(len(values)))\n\n m = self._create_map()\n data = pandas.DataFrame({id_name: ids, value_name: values})\n attrs = {\n 'geo_data': json.dumps(self.geojson()),\n 'data': data,\n 'columns': [id_name, value_name],\n 'key_on': key_on,\n 'fill_color': palette,\n }\n kwargs.update(attrs)\n folium.Choropleth(\n **kwargs,\n name='geojson'\n ).add_to(m)\n colored = self.format()\n colored._folium_map = m\n return colored",
"def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))",
"def colors_for_voronoi_polys_and_points(poly_shapes, poly_to_pt_assignments, cmap_name='tab20'):\n vor_colors = generate_n_colors(len(poly_shapes), cmap_name=cmap_name)\n\n pt_colors = [vor_colors[i_vor] for i_vor in get_points_to_poly_assignments(poly_to_pt_assignments)]\n\n assert len(vor_colors) <= len(pt_colors)\n\n return vor_colors, pt_colors",
"def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map",
"def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetFunnelColorArgs']]]]:\n return pulumi.get(self, \"colors\")",
"def colors(self) -> dict:\n raise NotImplementedError",
"def test_get_field_state_comparisons(self):\r\n comparison_groupings = get_field_state_comparisons(\r\n self.dist_matrix_header, self.dist_matrix, self.mapping_header,\r\n self.mapping, self.field, ['Control'])\r\n expected = {'Fast': {'Control': [0.72899999999999998,\r\n 0.80000000000000004, 0.72099999999999997, 0.76500000000000001,\r\n 0.77600000000000002, 0.74399999999999999, 0.749,\r\n 0.67700000000000005, 0.73399999999999999, 0.77700000000000002,\r\n 0.73299999999999998, 0.72399999999999998, 0.69599999999999995,\r\n 0.67500000000000004, 0.65400000000000003, 0.69599999999999995,\r\n 0.73099999999999998, 0.75800000000000001, 0.73799999999999999,\r\n 0.73699999999999999]}}\r\n self.assertDictEqual(comparison_groupings, expected)\r\n\r\n comparison_groupings = get_field_state_comparisons(\r\n self.dist_matrix_header, self.dist_matrix, self.mapping_header,\r\n self.mapping, self.field, ['Fast'])\r\n expected = {'Control': {'Fast': [0.72899999999999998,\r\n 0.80000000000000004, 0.72099999999999997, 0.76500000000000001,\r\n 0.77600000000000002, 0.74399999999999999, 0.749,\r\n 0.67700000000000005, 0.73399999999999999, 0.77700000000000002,\r\n 0.73299999999999998, 0.72399999999999998, 0.69599999999999995,\r\n 0.67500000000000004, 0.65400000000000003, 0.69599999999999995,\r\n 0.73099999999999998, 0.75800000000000001, 0.73799999999999999,\r\n 0.73699999999999999]}}\r\n self.assertDictEqual(comparison_groupings, expected)",
"def populate_color(cls, values):\n\n color_name = values.get(\"color_name\")\n color_index = values.get(\"color_index\")\n palette = values.get(\"palette\")\n\n # Set a default if needed\n if palette is None:\n palette = \"vtint\"\n\n if (color_name is None) and (color_index is None):\n return values\n\n elif color_name is None:\n raise ValueError(\"'color_name' can't be null when 'color_index' is not null\")\n\n elif color_index is None:\n raise ValueError(\"'color_index' can't be null when 'color_name' is not null\")\n\n values[\"color\"] = get_colors((color_name, color_index), palette=palette)\n return values",
"def fl_color(colr):\n _fl_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_color\",\\\n None, [xfdata.FL_COLOR],\\\n \"\"\"void fl_color(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n _fl_color(ul_colr)",
"def colors(self):\n unique, counts = np.unique(self.arr, return_counts=True)\n return {k: v for (k, v) in zip(unique, counts)}",
"def my_color_function(field):\n if field > 100000000:\n return \"#ff0000\"\n else:\n return \"#008000\"",
"def reduce_field_abrs(values):\n if \"c\" in values:\n values[\"color\"] = parse_color(values.pop(\"c\"))\n if \"ls\" in values:\n values[\"linestyle\"] = values.pop(\"ls\")\n if \"lw\" in values:\n values[\"linewidth\"] = values.pop(\"lw\")\n return values",
"def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping",
"def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLogTableColorArgs']]]]:\n return pulumi.get(self, \"colors\")",
"def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetTableColorArgs']]]]:\n return pulumi.get(self, \"colors\")",
"def color(self, *args):\n if args:\n l = len(args)\n if l == 1:\n pcolor = fcolor = args[0]\n elif l == 2:\n pcolor, fcolor = args\n elif l == 3:\n pcolor = fcolor = args\n pcolor = self._colorstr(pcolor)\n fcolor = self._colorstr(fcolor)\n self.pen(pencolor=pcolor, fillcolor=fcolor)\n else:\n return self._color(self._pencolor), self._color(self._fillcolor)",
"def state_style(person, color):\r\n return lambda x: {'fillColor': color if x['id']\r\n in person['States'] else 'white',\r\n 'color': 'black',\r\n 'weight': 0.3,\r\n 'fillOpacity': 0.5 if x['id']\r\n in person['States'] else 0.0\r\n }",
"def get_coloring(self):\n colors = dict()\n colors[-1] = 0\n if not self.sorting_active: # Don't want any normal colors on the last frame\n new_list = [int(i) for i in self.lst]\n if self.sorted_lst == new_list: # The list is sorted, color it green\n colors[-1] = 1\n return colors\n # Last read\n last_read_key = ThreadManagment.sort_data_by_thread[self.thread.ident].last_read_key\n if last_read_key >= 0:\n colors[last_read_key] = 2\n # Last write\n last_write_key = ThreadManagment.sort_data_by_thread[self.thread.ident].last_write_key\n if last_write_key >= 0:\n colors[last_write_key] = 2\n # Last lhs comparison\n last_cmp_left_value = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_left\n for i in range(len(self.lst)):\n if int(self.lst.getitem_no_count(i)) == int(last_cmp_left_value):\n colors[i] = 3\n break\n # Last rhs comparison\n last_cmp_right_value = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_right\n for i in range(len(self.lst)):\n if int(self.lst.getitem_no_count(i)) == int(last_cmp_right_value):\n colors[i] = 3\n break\n return colors",
"def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLineColorArgs']]]]:\n return pulumi.get(self, \"colors\")",
"def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists",
"def iter_colors(self):\n return itervalues(self)"
]
| [
"0.79582155",
"0.65347934",
"0.5298748",
"0.50596595",
"0.49272713",
"0.48494363",
"0.48474175",
"0.4675003",
"0.4637813",
"0.46354294",
"0.46199623",
"0.4567978",
"0.45625272",
"0.45435685",
"0.4524946",
"0.44895437",
"0.44807377",
"0.44734833",
"0.44628522",
"0.44232404",
"0.44162634",
"0.4410436",
"0.44053647",
"0.4398237",
"0.43945947",
"0.43644884",
"0.43619883",
"0.43597367",
"0.4333689",
"0.4331692"
]
| 0.79464936 | 1 |
Change the sortorder of the query set, depending on the form field [sortOrder] This function is used by EntryListView. The value of [sortOrder] is 'woord' by default. [sortOrder] is a hidden field inside the "adminsearch" html form in the template admin_gloss_list.html Its value is changed by clicking the up/down buttons in the second row of the search result table | def order_queryset_by_sort_order(get, qs):
def get_string_from_tuple_list(lstTuples, number):
"""Get the string value corresponding to a number in a list of number-string tuples"""
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack
# Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]
def order_queryset_by_tuple_list(qs, sOrder, sListName):
"""Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]"""
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)
# Set the default sort order
sOrder = 'woord' # Default sort order if nothing is specified
# See if the form contains any sort-order information
if ('sortOrder' in get and get['sortOrder'] != ''):
# Take the user-indicated sort order
sOrder = get['sortOrder']
# The ordering method depends on the kind of field:
# (1) text fields are ordered straightforwardly
# (2) fields made from a choice_list need special treatment
if (sOrder.endswith('handedness')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handedness")
elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Handshape")
elif (sOrder.endswith('locprim')):
ordered = order_queryset_by_tuple_list(qs, sOrder, "Location")
else:
# Use straightforward ordering on field [sOrder]
ordered = qs.order_by(sOrder)
# return the ordered list
return ordered | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def OnReorder( self, event ):\n column = self.columns[event.GetColumn()]\n if column.sortOn:\n # multiple sorts for the click...\n columns = [ self.columnByAttribute( attr ) for attr in column.sortOn ]\n diff = [ (a,b) for a,b in zip( self.sortOrder, columns ) if b is not a[1]]\n if not diff:\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [\n (c.defaultOrder,c) for c in columns \n ] + [ (a,b) for (a,b) in self.sortOrder if b not in columns]\n else:\n if column is self.sortOrder[0][1]:\n # reverse current major order\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [(column.defaultOrder,column)] + [\n (a,b) \n for (a,b) in self.sortOrder if b is not column \n ]\n # TODO: store current selection and re-select after sorting...\n self.reorder()\n self.Refresh()",
"def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order",
"def sort(self, column, order=Qt.AscendingOrder):\n if(column == Columns.Date):\n self.sorting = Sorting.Date\n elif(column == Columns.Code):\n self.sorting = Sorting.Code\n elif(column == Columns.User):\n self.sorting = Sorting.User\n elif(column == Columns.Tags):\n self.sorting = Sorting.Priviledges\n elif(column == Columns.TimesRequested):\n self.sorting = Sorting.TimesRequested\n\n if(order == Qt.DescendingOrder):\n self.sorting |= Sorting.Reversed\n\n self._reset_view()",
"def sort_field():\n _id = request.form['_id']\n old_index = request.form['old_index']\n new_index = request.form['new_index']\n data, code, message = FIELD_SERVICE.sort_field(_id, old_index, new_index)\n return __result(data, code, message)",
"def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs",
"def test_ordering_with_overridden_field_name_and_descending(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status', '-status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])",
"def get_sort_query(self, kind, order, is_number):\n pass",
"def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)",
"def _sortHandler(self) -> None:\n response, columnIndex, ascending = self._sortDialog()\n order = Qt.AscendingOrder if ascending else Qt.DescendingOrder\n if response:\n self._mainFileView.sortByColumn(columnIndex, order)",
"def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs",
"def change_sort(self, sorting_choice):\r\n self.message = \"place have been sorted by: {}\".format(sorting_choice)\r\n self.place_list.sort(sorting_choice)\r\n self.root.ids.entriesBox.clear_widgets()\r\n self.create_widget()\r\n sort_index = self.sort_choices.index(sorting_choice)\r\n self.current_sort = self.sort_choices[sort_index]",
"def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status')])",
"def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()",
"def post_sort(self, qs):\n return qs",
"def get_ordering(self):\n self.ordering = \"-fecha_vista\"\n return self.ordering",
"def get_selected_ordering(self):\n return self.request.GET.get(self.ordering_kwarg)",
"def get_sort_field(self, kind, order, is_number):\n pass",
"def sort_order(self, sort_order: int):\n\n self._sort_order = sort_order",
"def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == QtCore.Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))",
"def sort(self, order):\r\n params = base.get_params(None, locals())\r\n url = '{0}/sort'.format(self.get_url())\r\n\r\n request = http.Request('PUT', url, params)\r\n\r\n return request, parsers.parse_json",
"def sort_order(self, sort_order):\n\n self._sort_order = sort_order",
"def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )",
"def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))",
"def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()",
"def sort(self, col, order):\n self.layoutAboutToBeChanged.emit()\n self.mylist = sorted(self.mylist,\n key=operator.itemgetter(col))\n if order == Qt.DescendingOrder:\n self.mylist.reverse()\n self.layoutChanged.emit()",
"def testSortAscending(self):\n self.request.GET['sort'] = \"name,objid\"\n self.datagrid.load_state()\n\n self.assertEqual(self.datagrid.sort_list, [\"name\", \"objid\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 01\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 02\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 03\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()",
"def pre_sort(self, qs):\n return qs",
"def set_document_order(self, order):\n self.set_value_into_input_field(self.order_text_field_locator, order)",
"def __init__(self, data=None, *args, **kwargs):\n super().__init__(data=data, *args, **kwargs)\n\n # if a keyword search term is not present, relevance sort is disabled\n if not data or not data.get(\"q\", None):\n self.fields[\"sort\"].widget.choices[0] = (\n self.SORT_CHOICES[0][0],\n {\"label\": self.SORT_CHOICES[0][1], \"disabled\": True},\n )",
"def _augment_order(self, stmt, key, reverse):\n sqlstmt = stmt + \" ORDER BY %s\" % key\n if reverse:\n sqlstmt += \" DESC\"\n else:\n sqlstmt += \" ASC\"\n return sqlstmt"
]
| [
"0.66559404",
"0.65055186",
"0.63866353",
"0.63482296",
"0.6306466",
"0.6230435",
"0.6193875",
"0.60701925",
"0.5975241",
"0.59398514",
"0.58977914",
"0.58893275",
"0.58859015",
"0.5883438",
"0.58178324",
"0.58157855",
"0.5789593",
"0.57827884",
"0.5777034",
"0.5771956",
"0.5758289",
"0.57569736",
"0.57558995",
"0.5751626",
"0.5729712",
"0.57160896",
"0.57097346",
"0.5702781",
"0.56703454",
"0.5667723"
]
| 0.65601 | 1 |
Get the string value corresponding to a number in a list of numberstring tuples | def get_string_from_tuple_list(lstTuples, number):
sBack = [tup[1] for tup in lstTuples if tup[0] == number]
return sBack | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def number_to_name(number):\n for name, value in item_dict.items():\n if number == value:\n return name",
"def convert_label_num2string(number, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n return all_labels[number]",
"def num(number: int) -> str:\n numbers = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"ten\"}\n if number in numbers:\n return numbers[number]\n else:\n return f\"{number:,}\"",
"def numbers(n):\n if n == 0:\n return 'zero'\n elif n == 1:\n return 'one'\n elif n == 2:\n return 'two'\n else:\n return 'unknown number'",
"def species_tuple_to_string(species_tuple, roman_numerals=True):\n atomic_number, ion_number = species_tuple\n element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]\n if roman_numerals:\n roman_ion_number = int_to_roman(ion_number+1)\n return '{0} {1}'.format(str(element_symbol), roman_ion_number)\n else:\n return '{0} {1:d}'.format(element_symbol, ion_number)",
"def apnumber(value):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n if not 0 < value < 10:\r\n return str(value)\r\n return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),\r\n _('seven'), _('eight'), _('nine'))[value - 1]",
"def numList2String(l):\n\treturn ''.join(map(chr, l))",
"def getNameFromTuple(tuple, index):\n for row in tuple:\n if row[0] is int(index):\n return row[1]\n else:\n return None",
"def _number_finder(s, regex, numconv, py3_safe):\n\n # Split. If there are no splits, return now\n s = regex.split(s)\n if len(s) == 1:\n return tuple(s)\n\n # Now convert the numbers to numbers, and leave strings as strings\n s = remove_empty(s)\n for i in py23_range(len(s)):\n try:\n s[i] = numconv(s[i])\n except ValueError:\n pass\n\n # If the list begins with a number, lead with an empty string.\n # This is used to get around the \"unorderable types\" issue.\n # The _py3_safe function inserts \"\" between numbers in the list,\n # and is used to get around \"unorderable types\" in complex cases.\n # It is a separate function that needs to be requested specifically\n # because it is expensive to call.\n if not isinstance(s[0], py23_basestring):\n return _py3_safe([''] + s) if py3_safe else [''] + s\n else:\n return _py3_safe(s) if py3_safe else s",
"def error_num_to_name(num):\r\n for t in ERROR_CODES:\r\n if t[0] == num:\r\n return t[1]",
"def convert(number: int) -> str:\n\n sounds = [(3, \"Pling\"), (5, \"Plang\"), (7, \"Plong\")]\n result = [sound for divisor, sound in sounds if number % divisor == 0]\n\n return \"\".join(result) or f\"{number}\"",
"def getStrs(pre,num):\n result = []\n for i in range(num):\n result.append(pre+str(i))\n return result",
"def string(self, x):\n if isinstance(x, tuple):\n index = self.tuple_to_index[x]\n elif isinstance(x, int):\n index = x\n else:\n raise ValueError('x should be tuple or int; received {}'.format(x))\n return self.strings[index]",
"def processed(N:int)->tuple:\n l1= str(N)\n a,b = '',''\n for i in range(len(l1)):\n if l1[i] == '4':\n a+='2'\n b+='2'\n else:\n a+=str(l1[i])\n b+='0'\n return int(a), int(b)",
"def num_to_str(numList):\n\n\tresult = ''\n\tfor num in numList:\n\t\tresult += str(num)\n\treturn result",
"def translate_number(number):\n return NUMBER_TRANSLATOR[number]",
"def get_number(x):\n\n return re.findall(r'\\d+', x)[0]",
"def number_to_symbol(number):\n return [x for x in _atomic_number if _atomic_number[x] == number][0]",
"def decode(self, number=int) -> str:\n try:\n for key, value in self.index.table.items():\n if value == number:\n return(key)\n except Exception as error:\n print(f\"Error: self.decode({number}) -> {error}\")",
"def error_num_to_desc(num):\r\n for t in ERROR_CODES:\r\n if t[0] == num:\r\n try:\r\n return t[2]\r\n except IndexError:\r\n return \"\"",
"def extractDigits(key):\n text = \"\"\n digits = \"\"\n for c in key:\n if c in \"0123456789\":\n digits += c\n else:\n text += c\n return (text, 0 if not digits else int(digits))",
"def find_value_at(list, indx):\n row = list[indx]\n val = row.find_all(\"td\")[1].string[:-1]\n return float(val.replace(',', '.'))",
"def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation",
"def spell_number(num):\n tens, units = num / 10, num % 10\n tens_str = NUMBERS_10[tens]\n units_str = NUMBERS_1[units]\n if tens == 1:\n return NUMBERS_TEEN[units]\n elif tens:\n if units:\n return \"{t} {u}\".format(t=tens_str, u=units_str)\n return \"{t}\".format(t=tens_str)\n else:\n return units_str",
"def value(name):\r\n return sum(alpha.index(str(l)) + 1 for l in name)",
"def get_value(value: str, registers: dict):\n\n if value in registers:\n return registers[value]\n\n return int(value)",
"def print_ans(N, sr):\n idx_list = []\n for s in sr:\n tmp = np.where(base_list == s)[0]\n idx_list.append(int(tmp))\n ans = ''.join(dict_list[N][idx_list])\n print(ans)",
"def greater_than_index(numlist, singnum):\r\n try:\r\n for elem in numlist:\r\n if elem >= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'",
"def get(self, position):\n return self.numbers[position[0]][position[1]]",
"def find_first_int(*, instr) -> tuple:\n try:\n r = re.compile(r\"\\D*((?:\\d+\\.?)+)\\D*\")\n m = r.search(instr)\n key = f\"{int(m.group(1).replace('.', '')):>64}\"\n except AttributeError:\n key = instr.split(\".\")[0].lower()\n\n return key"
]
| [
"0.6241433",
"0.6169379",
"0.59133387",
"0.5674001",
"0.56030846",
"0.55849",
"0.55705535",
"0.5559834",
"0.5558215",
"0.5555174",
"0.55171454",
"0.55084604",
"0.5500799",
"0.5490005",
"0.54363096",
"0.5423369",
"0.5394054",
"0.5364714",
"0.53266186",
"0.5296184",
"0.5288965",
"0.5269076",
"0.52583057",
"0.52371925",
"0.5218879",
"0.52159375",
"0.51964074",
"0.51959944",
"0.5189877",
"0.5174388"
]
| 0.80886817 | 0 |
Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName] | def order_queryset_by_tuple_list(qs, sOrder, sListName):
# Get a list of tuples for this sort-order
tpList = build_choice_list(sListName)
# Determine sort order: ascending is default
bReversed = False
if (sOrder[0:1] == '-'):
# A starting '-' sign means: descending order
sOrder = sOrder[1:]
bReversed = True
# Order the list of tuples alphabetically
# (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)
tpList = sorted(tpList, key=operator.itemgetter(1))
# Order by the string-values in the tuple list
return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def order_queryset(self, queryset):\n if ordering := self.request.query_params.get(\"ordering\"):\n order_by = []\n regex = re.compile(r\"-?annotations__(?P<field_id>\\d+)\")\n fields = [field.strip() for field in ordering.split(\",\")]\n for match in filter(None, map(regex.match, fields)):\n field_id = match.group(\"field_id\")\n annotation_value = AnnotationValue.objects.filter(\n entity_id=OuterRef(\"pk\"), field_id=field_id\n ).values(\"_value__value\")\n annotate = {f\"_order_{field_id}\": Subquery(annotation_value)}\n queryset = queryset.annotate(**annotate)\n sign = \"-\" if match.string.startswith(\"-\") else \"\"\n order_by.append(f\"{sign}_order_{field_id}\")\n if order_by:\n queryset = queryset.order_by(*order_by)\n return queryset",
"def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered",
"def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")",
"def order_by(self, field_name, direction=ASCENDING):\n\n from jetengine.fields.base_field import BaseField\n from jetengine.fields.list_field import ListField\n\n if isinstance(field_name, (ListField,)):\n raise ValueError(\n \"Can't order by a list field. If you meant to order by the size of the list, please use either an Aggregation Pipeline query (look for Document.objects.aggregate) or create an IntField with the size of the list field in your Document.\"\n )\n\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n if field_name not in self.__klass__._fields:\n raise ValueError(\n \"Invalid order by field '%s': Field not found in '%s'.\" % (field_name, self.__klass__.__name__)\n )\n\n field = self.__klass__._fields[field_name]\n self._order_fields.append((field.db_field, direction))\n return self",
"def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass",
"def get_order(self, order, fields_name, many_to_many_fields):\n next_direction = '' if order[:1] == '-' else '-'\n real_order = ''\n field = ''\n if order[1:] == 'pk' or order == 'pk':\n real_order = order\n field = 'pk'\n else:\n if order[1:] in fields_name or order in fields_name:\n if order[1:] in many_to_many_fields or order in many_to_many_fields:\n real_order = 'pk'\n field = 'pk'\n else:\n real_order = order\n field = order if next_direction == '-' else order[1:]\n else:\n real_order = 'pk'\n field = 'pk'\n return next_direction, field, real_order",
"def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs",
"def order_query(self, query):\n\n direction = desc if self.direction == 'desc' else asc\n if self.order in inspect(self.model_class).columns.keys():\n attribute = getattr(self.model_class, self.order)\n elif self.order == 'group.name':\n attribute = func.coalesce(UserGroup.name, '')\n elif self.order == 'user.realname':\n attribute = func.coalesce(User.realname, '')\n elif self.order == 'user.username':\n attribute = func.coalesce(User.username, '')\n elif self.order == 'user.name':\n attribute = func.coalesce(User.realname, User.username, '')\n else:\n attribute = self.model_class.first_issue\n\n return query.order_by(None).order_by(direction(attribute))",
"def _parse_order_spec(self, spec):\n if not isinstance(spec, (list, tuple)):\n spec = [spec]\n \n res = []\n for field in spec:\n direction = pymongo.ASCENDING\n if field.startswith('-'):\n direction = pymongo.DESCENDING\n field = field[1:]\n \n field = self.document._meta.get_field_by_name(field).db_name\n res.append((field, direction))\n \n return res",
"def get_sort_query(self, kind, order, is_number):\n pass",
"def pre_sort(self, qs):\n return qs",
"def sortByName(requestContext, seriesList):\n def compare(x,y):\n return cmp(x.name, y.name)\n\n seriesList.sort(compare)\n return seriesList",
"def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self",
"def update_order():",
"def update_order():",
"def get_ordering(self, request, queryset, view):\n ordering = []\n params = get_datatables_ordering(request.query_params)\n if params:\n fields = [param.strip() for param in params.split(',')]\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n if ordering:\n return ordering\n\n # No ordering was included, or all the ordering fields were invalid\n return self.get_default_ordering(view)",
"def order_by(cls, *args):\n return cls.query.order_by(*args)",
"def orderList(dataSource,**kwargs):\n\treturn sorted(dataSource)",
"def user_order_by(self, field):\n # Get ordering model.\n model_label = order.utils.resolve_labels('.'.join(\\\n [self.model._meta.app_label, self.model._meta.object_name]))\n orderitem_set = getattr(self.model, \\\n order.utils.resolve_order_item_related_set_name(model_label))\n order_model = orderitem_set.related.model\n\n # Resolve ordering model table name.\n db_table = order_model._meta.db_table\n\n # Add ordering field as extra queryset fields.\n pk_name = self.model._meta.pk.attname\n\n # If we have a descending query remove '-' from field name when quering.\n sanitized_field = field.lstrip('-')\n\n extra_select = {\n sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \\\n (sanitized_field, db_table, self.model._meta.db_table, pk_name)\n }\n\n # Use original field name when ordering to allow for descending.\n return self.extra(select=extra_select).all().order_by(field)",
"def post_sort(self, qs):\n return qs",
"def Order(self) -> int:",
"def _sort_by_query_string_param(self, songs):\n orderable_fields_dict = {\n 'name': Lower('name'),\n 'artist': Lower('artist__name'),\n 'avgRating': 'average_rating',\n 'year': 'year'\n }\n\n order_by = self.request.query_params.get('orderBy', None)\n\n if order_by is not None and order_by in orderable_fields_dict:\n order_field = orderable_fields_dict[order_by]\n\n # sort in direction indicated by `direction` query string param\n # or ascending, by default\n direction = self.request.query_params.get('direction', 'asc')\n if direction == 'desc':\n if order_by == 'name' or order_by == 'artist':\n order_field = order_field.desc()\n else:\n order_field = '-' + order_field\n\n # add annotation for average_rating to sort by computed property\n if order_by == 'avgRating':\n songs = songs.annotate(\n average_rating=Avg('ratings__rating')\n )\n\n songs = songs.order_by(order_field)\n\n return songs",
"def order(self):\n raise NotImplementedError()",
"def order_data(self, data, order):\n return data",
"def get_ordering(self):\n self.ordering = \"-fecha_vista\"\n return self.ordering",
"def object_list(self):\n\n def _sort(ob, ol):\n reverse = ob.startswith(\"-\")\n ob = ob[1:] if reverse else ob\n for column in self.columns:\n if column.sort_key_fn is not None and column.name == ob:\n return sorted(ol, key=column.sort_key_fn, reverse=reverse)\n if self._meta.order_by and hasattr(ol, \"order_by\"):\n return ol.order_by(*self._meta.order_by.split(\"|\"))\n return ol\n\n ol = self._object_list\n ob = self._meta.order_by\n if not ob: return ol\n if isinstance(ob, basestring):\n return _sort(ob, ol)\n elif isinstance(ob, list):\n ob.reverse()\n for fn in ob:\n ol = _sort(fn, ol)\n return ol",
"def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F({'order': 'status'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['carl', 'alex', 'jacob', 'aaron'], lambda o: o.username)",
"def sort_queryset(queryset):\n objects = list(queryset)\n my_cmp = lambda x, y: cmp(str(x), str(y))\n objects.sort(my_cmp)\n return objects",
"def __order_queryset(self, queryset):\n if self.get_paginate_by(queryset) and \\\n self.request.method == \"POST\" and self.__has_initially_selected_items():\n current_order_by = list(queryset.query.order_by)\n whenqueries = []\n max_index = 0\n for index, value in enumerate(self.get_selected_values_queryset().order_by(*current_order_by)):\n whenqueries.append(models.When(pk=value.pk, then=models.Value(index)))\n max_index = index\n queryset = queryset.annotate(\n cradmin_multiselect2_ordering=models.Case(\n *whenqueries,\n default=max_index + 1,\n output_field=models.IntegerField()\n )\n )\n order_by = ['cradmin_multiselect2_ordering']\n order_by.extend(current_order_by)\n queryset = queryset.order_by(*order_by)\n return queryset",
"def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self"
]
| [
"0.68426776",
"0.68241733",
"0.6443088",
"0.64092714",
"0.635529",
"0.6322692",
"0.6254462",
"0.60782796",
"0.6071513",
"0.6053254",
"0.60431546",
"0.6017769",
"0.60118747",
"0.59976304",
"0.59976304",
"0.5987454",
"0.59871304",
"0.5966309",
"0.59529895",
"0.5945509",
"0.5930996",
"0.5919372",
"0.5917066",
"0.5902061",
"0.5896425",
"0.5863598",
"0.5843358",
"0.584148",
"0.5828631",
"0.58105755"
]
| 0.73274845 | 0 |
Paginate by specified value in querystring, or use default class property value. | def get_paginate_by(self, queryset):
return self.request.GET.get('paginate_by', self.paginate_by) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_pagination_from_request(\n base_class=Pagination, base_class_constructor_kwargs=None,\n default_size=None\n):\n kwargs = base_class_constructor_kwargs or dict()\n\n get_arg = flask.request.args.get\n return base_class.from_request(get_arg, default_size, **kwargs)",
"def paginate(self, request, queryset, max_results):\n page = request.GET.get('page')\n paginator = Paginator(queryset, max_results)\n try:\n return paginator.page(page)\n except PageNotAnInteger:\n return paginator.page(1)\n except EmptyPage:\n return paginator.page(paginator.num_pages)",
"def get_page(self):\n # This code is taken from within the GenericAPIView#paginate_queryset method.\n # We need need access to the page outside of that method for our paginate_search_results method\n page_kwarg = self.kwargs.get(self.paginator.page_query_param)\n page_query_param = self.request.query_params.get(self.paginator.page_query_param)\n return page_kwarg or page_query_param or 1",
"def paginator_class(self):\n if self.request.QUERY_PARAMS.get(\"with_extra\", False):\n return PaginatorWithExtraItem\n return Paginator",
"def paginate_queryset(self, queryset, request, view=None):\n # page_size = self.get_page_size(request)\n page_size = request.GET.get(\"page_size\")\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = request.query_params.get(self.page_query_param, 1)\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except InvalidPage as exc:\n msg = self.invalid_page_message.format(\n page_number=page_number, message=str(exc)\n )\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def paginate_queryset(self, queryset, request, view=None):\n self.request = request\n\n try:\n self.page_number = int(request.query_params.get(\n self.page_query_param, 1\n ))\n except ValueError:\n self.page_number = 1\n\n if self.page_number > self.max_page:\n raise NotFound('Result page number too high.')\n\n offset = (self.page_number - 1) * self.page_size\n queryset = queryset[offset:offset + self.page_size]\n self.results = queryset.execute()\n\n self.page = self.results[:self.page_size]\n\n return self.results, self.page",
"def get_paginate_data(self, *args, **kwargs):\n pass",
"def paginate(self, data):\n page = self.request.GET.get('page', None)\n if page:\n return self.paginate_data(data, page)\n\n return data, {}",
"def paginate_view(request, query_set, page=None, num_items=None):\n if page is None:\n page = request.GET.get('page', default=1)\n if num_items is None:\n num_items = request.GET.get('num_items', default=10)\n paginator = Paginator(query_set, num_items)\n try:\n data_set = paginator.page(page)\n except EmptyPage:\n data_set = paginator.page(paginator.num_pages)\n return data_set, num_items, page",
"def __paginate__(data, req):\n uri = req.uri\n\n links = list()\n links.append(dict(href=req.uri, rel='self'))\n\n if 'offset' in data:\n uri = re.sub('offset=.*?(&|$)', '', uri)\n index = uri.find('?')\n if index > 0 and len(req.query_context.keys()) > 0:\n uri = uri[:index + 1] + 'offset=' + str(data.pop('offset')) + '&' + uri[index + 1:]\n elif index > 0:\n uri = uri[:index + 1] + 'offset=' + str(data.pop('offset')) + uri[index + 1:]\n else:\n uri = uri[:index + 1] + '?offset=' + str(data.pop('offset')) + uri[index + 1:]\n links.append(dict(href=uri, rel='next'))\n\n data['links'] = links",
"def get_default_paginate_by(self, queryset):\n return self.paginate_by",
"def paginate_queryset(self, queryset, request, view=None):\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.page_query_param\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except Exception as exc:\n msg = self.invalid_page_message.format(page_number=page_number,\n message=str(exc))\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def paginate_queryset(self, queryset, request, view=None):\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.page_query_param\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except Exception as exc:\n msg = self.invalid_page_message.format(page_number=page_number,\n message=str(exc))\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def get_current_page(request, objects, **kwargs):\n # Show 25 items per page by default\n paginator = Paginator(objects, kwargs.get('slice', 25))\n page = request.GET.get('page')\n paginator._count = kwargs.get('count')\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n objects = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n objects = paginator.page(paginator.num_pages)\n\n return objects",
"def build_page(self):\n try:\n page_no = int(self.request.GET.get('page', 1))\n except (TypeError, ValueError):\n raise Http404(\"Not a valid number for page.\")\n\n if page_no < 0:\n raise Http404(\"Pages should be 0 or greater.\")\n\n # Suppress pagination by creating a single page with all results\n if page_no == 0:\n paginator = Paginator(self.results, self.results.count())\n return (paginator, paginator.page(1))\n else:\n return super(JsonFacetedSearchView, self).build_page()",
"def paginate_queryset(self, queryset, request, view=None):\n self.page_number_pagination = None\n if request.GET.get('q'):\n self.page_number_pagination = CustomPageNumberPagination()\n return self.page_number_pagination.paginate_queryset(\n queryset, request, view=view\n )\n\n self.base_url = request.build_absolute_uri()\n self.ordering = self.get_ordering(request, queryset, view)\n\n self.cursor = self.decode_cursor(request)\n if self.cursor is None:\n (offset, reverse, current_position) = (0, False, None)\n else:\n (offset, reverse, current_position) = self.cursor\n\n # Cursor pagination always enforces an ordering.\n if reverse:\n queryset.add_sort(*_reverse_ordering(self.ordering))\n else:\n queryset.add_sort(*self.ordering)\n\n # If we have a cursor with a fixed position then filter by that.\n if current_position is not None:\n order = self.ordering[0]\n is_reversed = order.startswith('-')\n order_attr = order.lstrip('-')\n\n # Test for: (cursor reversed) XOR (queryset reversed)\n if self.cursor.reverse != is_reversed:\n kwargs = {order_attr: {'lt': current_position}}\n else:\n kwargs = {order_attr: {'gt': current_position}}\n\n queryset.add_pagination_filter(kwargs)\n\n # If we have an offset cursor then offset the entire page by that amount.\n # We also always fetch an extra item in order to determine if there is a\n # page following on from this one.\n queryset = queryset[offset:offset + self.page_size + 1]\n logger.info('ES query: %s', json.dumps(queryset._s.to_dict()))\n results = queryset.execute()\n\n self.page = results[:self.page_size]\n if reverse:\n self.page = list(reversed(self.page))\n\n # Determine the position of the final item following the page.\n if len(results) > len(self.page):\n has_following_position = True\n following_position = self._get_position_from_instance(\n results[-1], self.ordering\n )\n else:\n has_following_position = False\n following_position = None\n\n if reverse:\n # If we have a reverse queryset, then the query ordering was in reverse\n # so we need to reverse the items again before returning them to the user.\n\n # Determine next and previous positions for reverse cursors.\n self.has_next = (current_position is not None) or (offset > 0)\n self.has_previous = has_following_position\n if self.has_next:\n self.next_position = current_position\n if self.has_previous:\n self.previous_position = following_position\n else:\n # Determine next and previous positions for forward cursors.\n self.has_next = has_following_position\n self.has_previous = (current_position is not None) or (offset > 0)\n if self.has_next:\n self.next_position = following_position\n if self.has_previous:\n self.previous_position = current_position\n\n # Display page controls in the browsable API if there is more\n # than one page.\n if (self.has_previous or self.has_next) and self.template is not None:\n self.display_page_controls = True\n\n return results, self.page",
"def get_page(request, queryset, per_page=None):\n if per_page is None:\n per_page = get_site().preferences.stream_count\n paginator = Paginator(queryset, per_page)\n try:\n pagenum = int(request.GET.get('page', 1))\n if pagenum < 1:\n pagenum = 1\n except ValueError:\n pagenum = 1\n try:\n page = paginator.page(pagenum)\n except InvalidPage:\n page = paginator.page(paginator.num_pages)\n return page",
"def pagination(self, lastValue=None, sortKey=\"_id\", limit=10, asc=\"ASC\"):\n comparison = \"\"\n if lastValue is not None:\n comparison = sortKey + \" > \" + sanitize_value(lastValue)\n limit = int(limit)\n if asc != \"ASC\" and asc != \"DESC\":\n asc = \"ASC\"\n results = self.__run(\n pagination_template.substitute(\n tablename=self.tablename,\n comparison=comparison,\n sortKey=sortKey,\n asc=asc,\n limit=limit\n ),\n )\n return results",
"def get_querystring_for_page(request, page_number, querystring_key,\n default_number=1, prefix=\"?\"):\n querydict = request.GET.copy()\n querydict[querystring_key] = page_number\n # for page number 1 there is no need for querystring\n if page_number == default_number:\n del querydict[querystring_key]\n if \"querystring_key\" in querydict:\n del querydict[\"querystring_key\"]\n if querydict:\n return \"%s%s\" % (prefix, querydict.urlencode())\n return \"\"",
"def paginate_queryset(self, queryset, request, view=None):\n self.count = self.get_count(queryset)\n self.start_index = 0\n self.end_index = self.start_index + self.page_size - 1\n\n # TODO: this logic is repeated below...\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n range_string = request.GET.get(self.range_query_param)\n\n if range_string:\n try:\n page_range = json.loads(range_string)\n except json.JSONDecodeError:\n return None\n\n if len(page_range) != 2:\n return None\n\n self.start_index, self.end_index = [pagination._positive_int(x) for x in page_range]\n\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n if self.start_index > self.end_index:\n self.start_index = self.end_index\n\n return list(queryset[self.start_index:self.end_index + 1])",
"def mk_paginator(request, items, num_items):\n paginator = Paginator(items, num_items)\n try: page = int(request.GET.get(\"page\", '1'))\n except ValueError: page = 1\n\n try:\n items = paginator.page(page)\n except (InvalidPage, EmptyPage):\n items = paginator.page(paginator.num_pages)\n return items",
"def mk_paginator(request, items, num_items):\n paginator = Paginator(items, num_items)\n try:\n page = int(request.GET.get(\"page\", '1'))\n except ValueError:\n page = 1\n\n try:\n items = paginator.page(page)\n except (InvalidPage, EmptyPage):\n items = paginator.page(paginator.num_pages)\n return items",
"def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)",
"def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)",
"def _other_page_querystring(self, page_number):\n if self.paginator.request:\n self.base_queryset['page'] = page_number\n return self.base_queryset.urlencode()\n\n # raise Warning(\"You must supply Paginator() with the request object for a proper querystring.\")\n return 'page=%s' % page_number",
"def paginated(self) -> global___Snippet.Paginated:",
"def get_page_number_from_request(request, querystring_key, default=1):\n try:\n return int(request.REQUEST[querystring_key])\n except (KeyError, TypeError, ValueError):\n return default",
"def paginate(query, count, total, offset):\n # type: (str, int, int, int) -> None\n if count < total and count == iwm.SEARCH_MAX_RESULTS:\n offset += 1\n if offset > 1:\n add_menu_item(search, \"[{} 1]\".format(ku.localize(32011)), {\"q\": query, \"offset\": 1})\n add_menu_item(search, \"[{} {}]\".format(ku.localize(32011), offset), {\"q\": query, \"offset\": offset})\n add_menu_item(index, \"[{}]\".format(ku.localize(32012)))",
"def paginate(self, klass=Paginator, per_page=None, page=1, *args, **kwargs):\r\n per_page = per_page or self._meta.per_page\r\n self.paginator = klass(self.rows, per_page, *args, **kwargs)\r\n self.page = self.paginator.page(page)",
"def page(self, request):\n draw = request.GET.get('draw', 0)\n length = int(request.GET.get('length', 5))\n start = int(request.GET.get('start', 0))\n order_column = int(request.GET.get('order[0][column]', 0))\n order_direction = request.GET.get('order[0][dir]', 'asc')\n search_keyword = request.GET.get('search[value]', '')\n raise NotImplementedError"
]
| [
"0.6643049",
"0.644725",
"0.6387844",
"0.612937",
"0.60254294",
"0.6023745",
"0.5997623",
"0.5988467",
"0.59765965",
"0.596013",
"0.59556097",
"0.5948403",
"0.5948403",
"0.5920509",
"0.5831184",
"0.581555",
"0.5793978",
"0.57869935",
"0.5746473",
"0.5735289",
"0.57138014",
"0.5711912",
"0.57044125",
"0.57044125",
"0.5683673",
"0.5679021",
"0.5675001",
"0.5661279",
"0.5649068",
"0.56202036"
]
| 0.6571558 | 1 |
This function saves the image to a designated directory | def save_image(dirname, filename, img):
if os.path.exists(dirname) == 0:
os.makedirs(dirname)
cv2.imwrite(dirname+filename+".bmp", img) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)",
"def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err",
"def save_image(image):\n if config['save_images']['enabled']:\n directory = config['save_images']['destination']\n filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\") + '.jpg'\n destination = os.path.join(directory, filename)\n logging.debug('saving image to %s', destination)\n f = open(destination, 'wb')\n f.write(image)\n f.close",
"def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)",
"def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)",
"def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)",
"def save_image(image, file_name):\n io.imsave(file_name,image)",
"def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)",
"def save_image(img, path):\n cv2.imwrite(path, img)",
"def save(self, path):\n dirname = osp.dirname(osp.abspath(path))\n if not osp.isdir(dirname):\n os.mkdir(dirname)\n image = self.build()\n LOGGER.info(\"Save image '%s'\", path)\n image.save(path)\n return image",
"def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)",
"def save(im, output_dir: Path):\n if not hasattr(save, \"counter\"):\n save.counter = 0 # type: ignore\n fname = f\"{save.counter:05d}.jpg\" # type: ignore\n cv2.imwrite(str(output_dir / fname), im)\n print(\"Saved\", fname)\n save.counter += 1 # type: ignore",
"def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n if extension:\n format = Image.image_format(extension)\n else:\n format = self.format\n filename = name + self.extension\n path = os.path.join(folder, filename)\n\n image = self.convert(format)\n if image._contents:\n f = open(path, \"wb\")\n f.write(image._contents)\n f.close()\n else:\n image.pil_image.save(path, format)\n\n return path",
"def save_image(image: FileStorage):\n base_path = Path(current_app.config[\"USER_DIR\"]) / \"images\"\n fileparts = image.filename.rsplit(\".\", 1)\n sanitized_filename = secure_filename(fileparts[0])\n dest_path = base_path / f\"{sanitized_filename}.{fileparts[1]}\"\n i = 1\n while dest_path.exists():\n dest_path = base_path / f\"{sanitized_filename}-{i}.{fileparts[1]}\"\n i += 1\n image.save(str(dest_path))\n return dest_path.parts[-1]",
"def save_image(file, filename):\n\n # create folder for storing images if not exist\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n\n #img = file.read()\n #img = Image.open(io.BytesIO(img))\n #img = img.resize((299, 299))\n\n\n # save image to local directory\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(file_path)\n\n return file_path",
"def save_file(self, input_path, output_path):\n try:\n im = Image.open(input_path)\n im.save(output_path)\n return output_path\n except Exception as e:\n return '!ERROR' + str(e)",
"def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")",
"def save_to_image(img, filename):\n filename = os.path.join(datadir, filename + '.png')\n print('Saving: ' + filename)\n img.to_pil().save(filename)",
"def imwrite(img, file_path, params=None, auto_mkdir=True):\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n ok = cv2.imwrite(file_path, img, params)\n if not ok:\n raise IOError('Failed in writing images.')",
"def save_pic(self, path_pic, pic):\n path_dir = os.path.dirname(path_pic)\n if not os.path.exists(path_dir):\n print(\"[INFO] Directory \\\"{}\\\" does not exist, creating...\"\n .format(path_dir))\n os.makedirs(path_dir)\n\n cv2.imwrite(path_pic, pic)",
"def __save_to_dir(self, imagelist, prefix, PATH):\n for pair in imagelist:\n directory = os.path.join(PATH, pair[1])\n if not os.path.exists(directory):\n os.mkdir(directory)\n filename = prefix + pair[2]\n pair[0].save(os.path.join(directory, filename))\n print(\"Saved \" + os.path.join(directory, filename))",
"def save_image(self):\n self.save()",
"def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)",
"def save(self, path):\n (folder, filename) = os.path.split(path)\n if not filename:\n filename = _clean_filename(self.name)\n path = os.path.join(folder, filename)\n return self.image.save(path)",
"def save_image(filename):\n subprocess(\"camera_save_image(%r)\" % filename)\n ##image = acquire_image()\n ##image.save(filename)",
"def save_image(path, data):\n misc.imsave(path, data)",
"def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")",
"def archive_image(self, img):\n \n try:\n imgname = \"roboimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.imgdir, imgname)\n # print(\"Pic name \" + imgpath)\n\n cv2.imwrite(imgpath, img)\n except:\n self.logger.error(\"archive_image failed %s\" % (imgpath))",
"def img_save(name,img):\n cv2.imwrite(name,img)",
"def save_photo():\n save_name = filedialog.asksaveasfilename(initialdir=\"10.APOD Viewer/\", title=\"Save Image\", filetype=((\"JPEG\", \"*.jpg\"), (\"All Files\", \"*.*\")))\n img.save(save_name + \".jpg\")"
]
| [
"0.7956009",
"0.78306276",
"0.76816463",
"0.7545857",
"0.7493665",
"0.74181014",
"0.73043764",
"0.7270899",
"0.7195625",
"0.71778727",
"0.7177214",
"0.71288425",
"0.7123138",
"0.70690674",
"0.7051356",
"0.7048735",
"0.7046836",
"0.7028891",
"0.70088464",
"0.69998574",
"0.69866985",
"0.6954471",
"0.6908166",
"0.68938106",
"0.6892136",
"0.6876007",
"0.68573195",
"0.68458366",
"0.6836399",
"0.6826137"
]
| 0.79338574 | 1 |
Exibe uma frase que descreve a capacidade da bateria | def descritivo_bateria(self) -> None:
print('Este carro tem bateria de ' + str(self.tamanho_bateria) + '-KWh.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def primera_palabra_mayuscula(cadena):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n for palabra in palabras: # recorro la palabra separada \n frase_final += palabra.capitalize() + \" \" # agarro la palabra separado y la primera letra la pongo en mayuscula \n return frase_final",
"def abbrev_help_more_text(self):\n pass",
"async def badman(self, ctx):\n await ctx.message.edit(content=\"̿̿ ̿̿ ̿̿ ̿'̿'\\̵͇̿̿\\з= ( ▀ ͜͞ʖ▀) =ε/̵͇̿̿/’̿’̿ ̿ ̿̿ ̿̿ ̿̿\")",
"def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")",
"def __fCap__(self, word, position):\n if word[0].isupper():\n if position == 0:\n return self.__return_feature_index__((\"fCapStart\", \"fCapStart\"))\n elif position > 0:\n return self.__return_feature_index__((\"fCapNoStart\", \"fCapNoStart\"))\n else:\n return False",
"async def bluetext(bte):\n if not bte.text[0].isalpha() and bte.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n if await bte.get_reply_message():\n await bte.edit(\n \"`BLUETEXT MUST CLICK.`\\n\"\n \"`Are you a stupid animal which is attracted to colours?`\"\n )",
"def bienvenida():\n print(\"=\"*100)\n print(\"Bienvenido rey Arturito, con este programa lograran realizar los calculos, para cerrar\",\n \"la puerta \\ny mantener seguro el castillo.\")\n print(\"=\"*100)",
"def explode(self):\n potency = self.flammability * self.weight\n if potency < 10:\n return '...fizzle.'\n elif potency < 50:\n return '...boom!'\n else:\n return '...BABOOM!!'",
"def explode(self):\n\n comb = self.flammability * self.weight\n if comb < 10:\n return \"...fizzle.\"\n elif comb >= 10 and comb < 50:\n return \"...boom!\"\n return \"...BABOOM!!\"",
"def verse_2():\n print(\"Old MacDonald had a farm\")\n print(\"E-I-E-I-O\")",
"async def bluetext(bt_e):\n if await bt_e.get_reply_message() and bt_e.is_group:\n await bt_e.edit(\n \"/BLUETEXT /MUST /CLICK.\\n\"\n \"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?\")",
"def abrevia(self, summary, sumlenght):\n bb = ''\n\n if sumlenght < len(summary):\n bb = summary[:sumlenght]\n\n lastspace = bb.rfind(' ')\n cutter = lastspace\n precut = bb[0:cutter]\n\n if precut.count('<b>') > precut.count('</b>'):\n cutter = summary.find('</b>', lastspace) + 4\n bb = summary[0:cutter]\n if precut.count('<strong>') > precut.count('</strong>'):\n cutter = summary.find('</strong>', lastspace) + 9\n bb = summary[0:cutter]\n\n if bb.count('<p') > precut.count('</p'):\n bb += '...</p>'\n else:\n bb = bb + '...'\n else:\n bb = summary\n\n return bb",
"def capacitance(self):\n return None",
"def poem(desc: Any) -> str:\n desc = str(desc)\n\n if len(desc) < 23:\n return desc + ' ' * (23 - len(desc))\n else:\n return desc[:20] + '...'",
"def caps(self, text):\n\n cap_finder = re.compile(r\"\"\"\n ( # Start group capture\n (?=(:?\\d*[A-Z]){2}) # Positive look ahead: At least two caps interspersed with any amount of digits must exist\n (?:[A-Z\\d']*) # Any amount of caps, digits or dumb apostrophes\n | # Or\n (?:[A-Z]+\\.\\s??){2,} # Caps followed by '.' must be present at least twice (note \\s?? which is non-greedy)\n ) # End group capture\n \"\"\", re.VERBOSE)\n \n replace_function = lambda match: \"\"\"<span class=\"caps\">%s</span>\"\"\" % match.group(1)\n text = cap_finder.sub(replace_function, text)\n\n return text",
"def get_abbreviated_description(self):\n word_array = str(self.description).split()[:25]\n abbreviated_description = \" \".join(word_array)\n return abbreviated_description",
"def just_do_it(text):\n from string import capwords\n return capwords(text)",
"def input_desc():\r\n desc = input(\"Entrez ici les remarques générales du directeur du tournoi: \")\r\n return desc",
"async def fivedollar(self, ctx):\n await ctx.message.edit(content=\"[̲̅$̲̅(̲̅5̲̅)̲̅$̲̅]\")",
"def description() -> str:\n content = \"Demonstrates usage of blackbord remappings.\\n\"\n content += \"\\n\"\n content += \"Demonstration is via an exemplar behaviour making use of remappings..\\n\"\n\n if py_trees.console.has_colours:\n banner_line = console.green + \"*\" * 79 + \"\\n\" + console.reset\n s = banner_line\n s += console.bold_white + \"Blackboard\".center(79) + \"\\n\" + console.reset\n s += banner_line\n s += \"\\n\"\n s += content\n s += \"\\n\"\n s += banner_line\n else:\n s = content\n return s",
"def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"",
"def is_cap_word(self, word):\n try:\n return word[0].isupper()\n except:\n return False",
"def salom_ber(ism):\n print(f\"Assalomu alaykum, hurmatli {ism.title()}!\")",
"def my_banner(bannerString):\n print(len(bannerString) * \"!\")\n print(bannerString)\n print(len(bannerString) * \"!\")",
"def testFalseCapTitle(self):\n val = capTitles(\"victor Ifezue\") \n self.assertNotEqual(val, \"victor Ifezue\")",
"async def copypasta(cp_e):\n if not cp_e.text[0].isalpha() and cp_e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n textx = await cp_e.get_reply_message()\n message = cp_e.pattern_match.group(1)\n\n if message:\n pass\n elif textx:\n message = textx.text\n else:\n await cp_e.edit(\"`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`\")\n return\n\n reply_text = random.choice(EMOJIS)\n # choose a random character in the message to be substituted with 🅱️\n b_char = random.choice(message).lower()\n for owo in message:\n if owo == \" \":\n reply_text += random.choice(EMOJIS)\n elif owo in EMOJIS:\n reply_text += owo\n reply_text += random.choice(EMOJIS)\n elif owo.lower() == b_char:\n reply_text += \"🅱️\"\n else:\n if bool(random.getrandbits(1)):\n reply_text += owo.upper()\n else:\n reply_text += owo.lower()\n reply_text += random.choice(EMOJIS)\n await cp_e.edit(reply_text)",
"def explode(self):\n return \"...it's a glove.\"",
"def Prints_banana_when_removing_z_from_banana():\n check50.run(\"python3 remove_letter.py\"\n ).stdin(\"banana\", prompt=True\n ).stdin(\"z\", prompt=True\n ).stdout(\"banana\", regex=False\n ).exit()",
"def bulbs_finale():\n check50.run(\"./bulbs\").stdin(\"In my younger and more vulnerable years my father gave me some advice that I've been turning over in my mind ever since.\").stdout(\"⚫🟡⚫⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡🟡🟡⚫⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡🟡\\n⚫🟡🟡🟡⚫🟡⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡🟡🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡🟡⚫🟡⚫🟡\\n⚫🟡🟡⚫🟡🟡⚫⚫\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡⚫⚫⚫🟡⚫\\n⚫🟡🟡⚫🟡🟡⚫⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡🟡⚫⚫🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫🟡🟡🟡⚫⚫🟡🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡🟡🟡⚫⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫🟡🟡⚫\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡🟡⚫🟡⚫⚫\\n⚫🟡🟡⚫🟡⚫⚫⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫🟡🟡🟡\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡⚫⚫🟡🟡\\n⚫🟡🟡⚫🟡🟡🟡🟡\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡⚫⚫🟡⚫⚫\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫⚫⚫🟡🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡⚫🟡⚫⚫\\n⚫🟡🟡⚫🟡⚫⚫⚫\\n⚫🟡🟡⚫⚫⚫⚫🟡\\n⚫🟡🟡🟡⚫🟡⚫⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡⚫⚫🟡⚫⚫🟡\\n⚫⚫🟡⚫⚫🟡🟡🟡\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫⚫🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡⚫🟡⚫⚫\\n⚫🟡🟡🟡⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡🟡🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡🟡🟡\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡🟡🟡⚫⚫🟡\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫🟡🟡⚫🟡\\n⚫🟡🟡⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫🟡🟡⚫\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫🟡🟡🟡⚫⚫🟡⚫\\n⚫⚫🟡⚫⚫⚫⚫⚫\\n⚫🟡🟡🟡⚫⚫🟡🟡\\n⚫🟡🟡⚫🟡⚫⚫🟡\\n⚫🟡🟡⚫🟡🟡🟡⚫\\n⚫🟡🟡⚫⚫⚫🟡🟡\\n⚫🟡🟡⚫⚫🟡⚫🟡\\n⚫⚫🟡⚫🟡🟡🟡⚫\\n\").exit(0)",
"def get_description(lines):\n # najdeme zacatek obsahu\n begin = [idx for idx, l in enumerate(lines) if MATERIALY_BEGIN_RE.match(l)]\n if not begin:\n raise Neco(u'Nepodařilo se mi nalézt počátek bloku s obsahem.')\n begin = begin[0]\n\n # najdeme konec obsahu\n end = [idx for idx, l in enumerate(lines) if MATERIALY_END_RE.match(l)]\n if not end:\n raise Neco(u'Nepodařilo se mi nalézt konec bloku s obsahem.')\n end = end[0]\n\n # vyzobneme si obsah\n return lines[begin+1:end]"
]
| [
"0.5560311",
"0.55506593",
"0.548226",
"0.5430165",
"0.534019",
"0.5291365",
"0.527517",
"0.5225864",
"0.5213224",
"0.5199035",
"0.5184053",
"0.51668406",
"0.51542085",
"0.51403135",
"0.513504",
"0.5109156",
"0.51080704",
"0.51006496",
"0.50771993",
"0.50429213",
"0.5034565",
"0.50273174",
"0.5023682",
"0.50110847",
"0.49899858",
"0.49748504",
"0.49602273",
"0.4950958",
"0.49421737",
"0.49378324"
]
| 0.61784256 | 0 |
Function to add padding to an image | def add_padding(im, pad):
return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image",
"def add_padding(img, x_padding):\n w = img.shape[1] + x_padding * 2\n img_with_padding = np.zeros((img.shape[0], w, 3), dtype=img.dtype)\n img_with_padding[:, x_padding:img.shape[1] + x_padding] = img\n return img_with_padding",
"def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:\n self.padded_img = np.zeros(\n (img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))\n self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img\n return self.padded_img",
"def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img",
"def add_white_padding(img, width=WIDTH, height=HEIGHT):\n top = max(0, height)\n right = max(0, width)\n \n result = np.full((top, right), 255)\n\n result[result.shape[0]-img.shape[0]:result.shape[0],:img.shape[1]] = img\n \n return result",
"def run_padding(self):\n\n image_padded, mask, self.pad_to_right, self.pad_to_bottom = gen_padded_image_and_mask (os.path.join('utils_dfn/temp', self.file_name_with_ext),\n self.new_height, self.new_width)\n cv2.imwrite(os.path.join('utils_dfn/img', self.file_name + '_padded_resized.png'), image_padded)\n cv2.imwrite(os.path.join('utils_dfn/mask', self.file_name + '_mask.png'), mask)",
"def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img",
"def pad_img(image, label):\n paddings = [[2,2],[2,2],[0,0]]\n return tf.pad(image, paddings, mode=\"CONSTANT\", constant_values=0.0), label",
"def pad_image(img, output_path, pad_size=[8,8,8,8], buckets=None):\n top, left, bottom, right = pad_size\n old_im = Image.open(img)\n old_size = (old_im.size[0] + left + right, old_im.size[1] + top + bottom)\n new_size = get_new_size(old_size, buckets)\n new_im = Image.new(\"RGB\", new_size, (255,255,255))\n new_im.paste(old_im, (left, top))\n new_im.save(output_path)",
"def expand_rect_padding(img_path, padding_x, padding_top, padding_bottom, out_path):\n pil_image_frame = Image.open(img_path)\n im_width, im_height = pil_image_frame.size \n \n n_width = im_width + 2 * padding_x\n n_height = im_height + padding_top + padding_bottom\n \n old_size = (im_width, im_height)\n new_size = (n_width, n_height)\n new_im = Image.new(\"RGB\", new_size, \"white\") \n new_im.paste(pil_image_frame, ((new_size[0]-old_size[0])/2, padding_top)) # insert image into center of new canvas with vertical shift = padding_top \n\n new_im.save(out_path, \"JPEG\")",
"def pad(self, *args, **kwargs):\n return _image.image_pad(self, *args, **kwargs)",
"def padImage(image, padList):\r\n\r\n #pad along far x:<---->\r\n padFarX= np.zeros((image.shape[0], image.shape[1], padList[0]))\r\n image= np.concatenate((image, padFarX), axis=2)\r\n\r\n #pad along far y\r\n padFarY= np.zeros((image.shape[0], padList[1], image.shape[2]))\r\n image= np.concatenate((image, padFarY), axis=1)\r\n\r\n #pad along far z\r\n padFarZ= np.zeros((padList[2], image.shape[1], image.shape[2]))\r\n image= np.concatenate((image, padFarZ), axis=0)\r\n\r\n #pad along close x, adjust center\r\n padCloseX= np.zeros((image.shape[0], image.shape[1], padList[3]))\r\n image= np.concatenate((padCloseX, image), axis=2)\r\n\r\n #pad along close y adjust center\r\n padCloseY= np.zeros((image.shape[0], padList[4], image.shape[2]))\r\n image= np.concatenate((padCloseY, image), axis=1)\r\n\r\n #pad along close z, adjust center\r\n padCloseZ= np.zeros((padList[5], image.shape[1], image.shape[2]))\r\n image= np.concatenate((padCloseZ, image), axis=0)\r\n\r\n\r\n #print \"PADDED IMAGE SHAPE: \" + str(image.shape)\r\n return image",
"def pad(img, pad_size=32):\n\n if pad_size == 0:\n return img\n\n height, width = img.shape[:2]\n\n if height % pad_size == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = pad_size - height % pad_size\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % pad_size == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = pad_size - width % pad_size\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)",
"def pad_to(image,w,h):\n iw,ih = image.shape\n wd = int(w-iw)\n assert wd>=0\n w0 = wd/2\n w1 = wd-w0\n hd = int(h-ih)\n assert hd>=0\n h0 = hd/2\n h1 = hd-h0\n result = zeros((w,h))\n result[w0:w0+iw,h0:h0+ih] = image\n return result",
"def pad_images(_input_image_paths : list[str], _output_image_dir : str, \\\n _pad_colour : tuple[int,int,int]) -> None:\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n\n #Rotate the image based on the EXIF data's orientation tag.\n #Ensures that images taller than they are wide are kept as such when padding\n image_object = PIL.ImageOps.exif_transpose(image_object)\n\n old_x,old_y = image_object.size\n bigger_dimension = max(old_x,old_y)\n\n #Figure out how much extra should be added to each of the four sides\n x_additive = y_additive = 0\n if old_x > old_y:\n y_additive = (old_x - old_y)//2\n\n elif old_y > old_x:\n x_additive = (old_y - old_x)//2\n\n #Create a new, larger image with the requested padding colour,\n # and then paste the original image overtop in the correct position\n new_canvas = Image.new(\"RGB\", (bigger_dimension,bigger_dimension), _pad_colour)\n new_canvas.paste(image_object, (x_additive, y_additive))\n new_canvas.save(_output_image_dir + os.path.basename(image))",
"def pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n old_size = img.shape[-2:]\n pad_size = (torch.tensor(new_size) - torch.tensor(old_size)) / 2\n padding = torch.cat((torch.floor(pad_size), torch.ceil(pad_size)))\n padding[padding < 0] = 0\n padding = [int(x) for x in padding]\n return F.pad(img, padding=padding, padding_mode='edge')",
"def _do_adaptive_padding(self, im):\n im_sz = list(im.shape)\n dim = len(im_sz)\n dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]\n dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]\n new_dim_sz = [(dim_rem[i]+1)*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]\n before_id = [(new_dim_sz[i] -im_sz[i]+1)//2 for i in range(dim)]\n after_id = [new_dim_sz[i] - im_sz[i] - before_id[i] for i in range(dim)]\n padding_loc = tuple([(before_id[i],after_id[i]) for i in range(dim)])\n new_img = np.lib.pad(im, padding_loc, 'edge')\n return new_img",
"def pil_pad_image(img, v_pad_before, v_pad_after, h_pad_before, h_pad_after, cval=None):\n # type: (PImage.Image, int, int, int, int, tuple) -> PImage.Image\n\n width = img.width + h_pad_before + h_pad_after\n height = img.height + v_pad_before + v_pad_after\n mode = img.mode\n\n if width == img.width and height == img.height:\n return img\n\n # Make sure the cval is in the correct format if None default to black\n if cval is not None:\n if isinstance(cval, float):\n cval = int(round(cval))\n elif isinstance(cval, int):\n cval = cval\n else:\n cval = np.round(cval).astype(dtype=np.int32)\n cval = tuple(cval)\n else:\n cval = 0\n\n try:\n padded_img = PImage.new(mode=mode, size=(width, height), color=cval)\n padded_img.paste(img, box=(h_pad_before, v_pad_before))\n except TypeError as e:\n print 'ERROR: Could not create new PIL image PImage.new(mode={}, size={}, color={}), error: {}'.format(mode, (width, height), cval, e.message)\n raise e\n\n return padded_img",
"def wrap(img, padding):\n if not transforms.functional._is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (int, tuple)):\n raise TypeError('Got inappropriate padding arg')\n \n\n if isinstance(padding, tuple) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, tuple) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, tuple) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if img.mode == 'P':\n palette = img.getpalette()\n img = np.asarray(img)\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), 'wrap')\n img = Image.fromarray(img)\n img.putpalette(palette)\n return img\n\n img = np.asarray(img)\n # RGB image\n if len(img.shape) == 3:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), 'wrap')\n # Grayscale image\n if len(img.shape) == 2:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), 'wrap')\n\n return Image.fromarray(img)",
"def pad_image(img, target_size):\r\n rows_missing = target_size[0] - img.shape[2]\r\n cols_missing = target_size[1] - img.shape[3]\r\n padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant')\r\n return padded_img",
"def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2",
"def padding_width(self):\n ...",
"def add_context_margin(image, margin_size, **pad_kwargs):\n return np.pad(image,\n ((margin_size, margin_size),\n (margin_size, margin_size),\n (0, 0)), **pad_kwargs)",
"def image_pad(image, pad_width=None, axis=0, mode='symmetric'):\n hei, wid = image.shape[0], image.shape[1]\n\n if pad_width is None:\n th = hei // 10\n tw = wid // 10\n pad_width = ((th, th), (tw, tw), (0, 0))\n if axis == 0:\n if type(pad_width[0]) == tuple:\n pad_width = (pad_width[0], (0, 0), (0, 0))\n else:\n pad_width = (pad_width, (0, 0), (0, 0))\n if axis == 1:\n if type(pad_width[0]) == tuple:\n pad_width = ((0, 0), pad_width[1], (0, 0))\n else:\n pad_width = ((0, 0), pad_width, (0, 0))\n if len(image.shape) == 3:\n newimage = np.pad(image, pad_width, mode)\n elif len(image.shape) == 2:\n newimage = np.squeeze(np.pad(image[:, :, np.newaxis], pad_width, mode))\n\n return cv2.resize(newimage, (wid, hei), interpolation=cv2.INTER_NEAREST)",
"def remove_padding(im, pad):\n\n return im[pad:-pad, pad:-pad]",
"def pad(img, padding, fill=0, mode='constant'):\n check_type(img)\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert mode in ['constant', ], 'Padding mode should be constant'\n\n return ImageOps.expand(img, border=padding, fill=fill)",
"def pad(img, padding, fill=0, padding_mode='constant'):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n # if padding_mode == 'constant':\n # aug = iaa.Pad(px=padding, pad_mode=padding_mode, pad_cval=fill, keep_size=False)\n # return aug.augment_image(img)\n # else:\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, Sequence) and len(padding) == 2:\n pad_top = pad_bottom = padding[0]\n pad_left = pad_right = padding[1]\n if isinstance(padding, Sequence) and len(padding) == 4:\n pad_top = padding[0]\n pad_left = padding[1]\n pad_bottom = padding[2]\n pad_right = padding[3]\n\n aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, pad_cval=fill,\n keep_size=False)\n # aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, keep_size=False)\n return aug.augment_image(img)\n\n # # RGB image\n # if len(img.shape) == 3:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n # # Grayscale image\n # if len(img.shape) == 2:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n\n # return img",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def pad_image(input_img, window_size, padding_mode='symmetric'):\n assert np.isscalar(window_size)\n assert window_size % 2 == 1\n\n # Padding width must be window_size-1 and divided by 2. So that we can check every pixels\n pad_width = int((window_size-1)/2)\n # For each padding_mode, pad differently\n\n # But in result, I chose symmetric cause it seems to have smallest aepe\n if padding_mode == 'symmetric':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'reflect':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'constant':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n\n return padded_img",
"def _pad_img(self, results):\n pad_val = self.pad_val.get('img', 0)\n for key in results.get('img_fields', ['img']):\n if self.pad_to_square:\n max_size = max(results[key].shape[:2])\n self.size = (max_size, max_size)\n if self.size is not None:\n padded_img = general_ocr.impad(\n results[key], shape=self.size, pad_val=pad_val)\n elif self.size_divisor is not None:\n padded_img = general_ocr.impad_to_multiple(\n results[key], self.size_divisor, pad_val=pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor"
]
| [
"0.7838249",
"0.7809977",
"0.7655503",
"0.7654041",
"0.7589162",
"0.75757426",
"0.7524358",
"0.73401976",
"0.73400784",
"0.7287376",
"0.72720695",
"0.7152099",
"0.70969987",
"0.69374233",
"0.68974817",
"0.68948",
"0.68466234",
"0.68302625",
"0.67844594",
"0.67728436",
"0.676059",
"0.6756223",
"0.67198646",
"0.6698514",
"0.6695081",
"0.6683348",
"0.66548806",
"0.6645458",
"0.65898395",
"0.6578761"
]
| 0.7822523 | 1 |
Function for removing padding from an image. | def remove_padding(im, pad):
return im[pad:-pad, pad:-pad] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unpadding(img, n):\n img = img[n:img.shape[0]-n, n:img.shape[1]-n]\n\n return img",
"def trim_image(image):\n bbox = image.getbbox()\n return image.crop(bbox)",
"def trim(im):\n \n bg = Image.new(im.mode, im.size, im.getpixel((0,0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)",
"def unpad_im(im, factor):\n height = im.shape[0]\n width = im.shape[1]\n\n pad_height = int(np.ceil(height / float(factor)) * factor - height)\n pad_width = int(np.ceil(width / float(factor)) * factor - width)\n\n if len(im.shape) == 3:\n return im[0:height - pad_height, 0:width - pad_width, :]\n elif len(im.shape) == 2:\n return im[0:height - pad_height, 0:width - pad_width]",
"def remove_padding(self, data):\n pad_len = ord(data[-1])\n return data[:-pad_len]",
"def remove_padding_from_bb(boxes, x_padding):\n boxes[boxes[:, 0] < x_padding] = x_padding\n boxes[:, 0] -= x_padding\n boxes[:, 2] -= x_padding\n return boxes",
"def crop_image(img: PilImage, margin: int = 20) -> PilImage:\n # First find the boundaries of the white area\n x0_lim = img.width\n y0_lim = img.height\n x1_lim = 0\n y1_lim = 0\n for x in range(0, img.width):\n for y in range(0, img.height):\n if img.getpixel((x, y)) != (255, 255, 255):\n if x < x0_lim:\n x0_lim = x\n if x > x1_lim:\n x1_lim = x\n if y < y0_lim:\n y0_lim = y\n if y > y1_lim:\n y1_lim = y\n x0_lim = max(x0_lim, 0)\n y0_lim = max(y0_lim, 0)\n x1_lim = min(x1_lim + 1, img.width)\n y1_lim = min(y1_lim + 1, img.height)\n # Then crop to this area\n cropped = img.crop((x0_lim, y0_lim, x1_lim, y1_lim))\n # Then create a new image with the desired padding\n out = Image.new(\n img.mode,\n (cropped.width + 2 * margin, cropped.height + 2 * margin),\n color=\"white\",\n )\n out.paste(cropped, (margin + 1, margin + 1))\n return out",
"def _trim_margins(self, img):\n oldsize = (0, 0)\n while oldsize != img.shape: # while the size is changing\n oldsize = img.shape\n for i in range(4): # 4 times\n img = num.rot90(img) # rotate 90\n if num.std(img[0, :]) < self.trim_std: # if low std\n img = img[1:, :] # trim edge\n\n return img",
"def add_white_padding(img, width=WIDTH, height=HEIGHT):\n top = max(0, height)\n right = max(0, width)\n \n result = np.full((top, right), 255)\n\n result[result.shape[0]-img.shape[0]:result.shape[0],:img.shape[1]] = img\n \n return result",
"def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img",
"def RemovePadding(value):\n pad_length = struct.unpack('B', value[-1])[0]\n return value[:-pad_length]",
"def strip(self):\n result = library.MagickStripImage(self.wand)\n if not result:\n self.raise_exception()",
"def remove_pad(x, pad_remover, mode):\n # Concatenate all tokens (without padding)\n x = flatten_all_but_last(x)\n\n # Remove padding for training and eval\n if mode != ModeKeys.PREDICT:\n # This is a hack to allows inference when the <go> token\n # is detected as padding and removed. This works for now because there is\n # no padding at inference.\n x = pad_remover.remove(x)\n\n x = tf.expand_dims(x, axis=0) # Now batch_size=1\n return x",
"def _UnPad(self, padded):\n pad = bytearray(padded)[-1]\n return padded[:-pad]",
"def crop_image(img: PilImage, margin: int = 20) -> PilImage:\n # pylint: disable=invalid-name\n # First find the boundaries of the white area\n x0_lim = img.width\n y0_lim = img.height\n x1_lim = 0\n y1_lim = 0\n for x in range(0, img.width):\n for y in range(0, img.height):\n if img.getpixel((x, y)) != (255, 255, 255):\n if x < x0_lim:\n x0_lim = x\n if x > x1_lim:\n x1_lim = x\n if y < y0_lim:\n y0_lim = y\n if y > y1_lim:\n y1_lim = y\n x0_lim = max(x0_lim, 0)\n y0_lim = max(y0_lim, 0)\n x1_lim = min(x1_lim + 1, img.width)\n y1_lim = min(y1_lim + 1, img.height)\n # Then crop to this area\n cropped = img.crop((x0_lim, y0_lim, x1_lim, y1_lim))\n # Then create a new image with the desired padding\n out = Image.new(\n img.mode,\n (cropped.width + 2 * margin, cropped.height + 2 * margin),\n color=\"white\",\n )\n out.paste(cropped, (margin + 1, margin + 1))\n return out",
"def _trim_border(img):\n for i in range(img.shape[0]):\n if np.any(img[i, :, :] != 255):\n img = img[i:, :, :]\n break\n\n for i in range(img.shape[0] - 1, 0, -1):\n if np.any(img[i, :, :] != 255):\n img = img[: i + 1, :, :]\n break\n\n for i in range(img.shape[1]):\n if np.any(img[:, i, :] != 255):\n img = img[:, i:, :]\n break\n\n for i in range(img.shape[1] - 1, 0, -1):\n if np.any(img[:, i, :] != 255):\n img = img[:, : i + 1, :]\n break\n\n return img",
"def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image",
"def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img",
"def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def run_padding(self):\n\n image_padded, mask, self.pad_to_right, self.pad_to_bottom = gen_padded_image_and_mask (os.path.join('utils_dfn/temp', self.file_name_with_ext),\n self.new_height, self.new_width)\n cv2.imwrite(os.path.join('utils_dfn/img', self.file_name + '_padded_resized.png'), image_padded)\n cv2.imwrite(os.path.join('utils_dfn/mask', self.file_name + '_mask.png'), mask)",
"def remove_transparency(image):\n new_image = Image.new(\"RGBA\", image.size, \"WHITE\")\n new_image.paste(image, (0, 0), image)\n new_image.convert('RGB')\n return new_image",
"def zero_pad_and_crop_img(img, amount=4):\n padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,\n img.shape[2]))\n padded_img[amount:img.shape[0] + amount, amount: img.shape[1] + amount, :] = img\n top = np.random.randint(low=0, high=2 * amount)\n left = np.random.randint(low=0, high=2 * amount)\n new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]\n return new_img",
"def remove_background(img):\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img",
"def remove_background(img):\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n \n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img",
"def trim(self):\n result = library.MagickTrimImage(self.wand)\n if not result:\n self.raise_exception()",
"def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:\n self.padded_img = np.zeros(\n (img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))\n self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img\n return self.padded_img",
"def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x,pos_y = pos_x/pix,pos_y/pix \r\n\r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode==\"Delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images",
"def remove_background(img):\n mask = np.zeros(img.shape[:2], np.uint8)\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n rect = (50, 50, 450, 290)\n cv.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT)\n mask2 = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')\n img = img*mask2[:, :, np.newaxis]\n return img",
"def unset_padding(self):\n if self.metadata.Signal.has_item('pad_tuple'):\n Npy, Npx = self.metadata.Signal.pad_tuple\n else:\n # If no padding was done, return the same signal\n return self\n Nx, Ny = self.axes_manager.signal_shape\n s=self.deepcopy()\n del s.metadata.Signal.pad_tuple\n if self.axes_manager.navigation_dimension == 0:\n s.data = s.data[Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n elif self.axes_manager.navigation_dimension > 0:\n s.data = s.data[..., Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n # copy in case of non-linear defoci\n s.axes_manager.navigation_axes[0].axis = self.axes_manager.navigation_axes[0].axis.copy()\n return s"
]
| [
"0.71974975",
"0.6799333",
"0.65907735",
"0.6541983",
"0.64343464",
"0.64166254",
"0.63674694",
"0.6343772",
"0.63230455",
"0.63128173",
"0.62604237",
"0.6255182",
"0.6252271",
"0.6218365",
"0.6182188",
"0.6165211",
"0.61537254",
"0.6135829",
"0.6098082",
"0.606794",
"0.6055587",
"0.6054746",
"0.6050417",
"0.6049742",
"0.60434806",
"0.59876126",
"0.5950694",
"0.5931977",
"0.5914518",
"0.5877829"
]
| 0.8617263 | 0 |
Sets up the FIREBIRD env var for securty2.fdb lookup | def setup_environmentvars(self, path, mydbfilepath):
if not FDB_AVAILABLE:
print(BColors.WARNING + "Warning: fdb_embedded couldn't be imported! " \
+ "\nMake sure you've installed fdb_embedded correctly." + BColors.ENDC)
return False
os.environ['FIREBIRD'] = path
self.db_filepath = mydbfilepath
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initializeEnvironment(self, count, pid):\n result = os.environ.copy()\n result['LISTEN_FDS'] = str(count)\n result['LISTEN_PID'] = str(pid)\n return result",
"def setDB(dbname):\n global DBNAME\n DBNAME = dbname",
"def setup_firebase():\n config = {\n \"apiKey\": environment_vars.FIREBASE_API_KEY,\n \"authDomain\": environment_vars.FIREBASE_PROJECT_ID,\n \"databaseURL\": environment_vars.FIREBASE_DB_URL,\n \"storageBucket\": environment_vars.FIREBASE_PROJECT_ID,\n \"serviceAccount\": \"./bandex_services_account.json\"\n }\n\n try:\n service_account = environment_vars.FIREBASE_SERVICE_ACCOUNT\n f = open('./bandex_services_account.json', 'w')\n f.write(service_account)\n f.close()\n\n except Exception as e:\n print(\"Erro ao escrever no arquivo de service account: \", e)\n else:\n print(\"Service account configurado com sucesso.\")\n\n firebase = pyrebase.initialize_app(config)\n\n db = firebase.database()\n return db",
"def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()",
"def set_db(db):\n global db_run # Imports the DB from the simulator\n db_run=db",
"def test_missingFDSVariable(self):\n fakeEnvironment = self.initializeEnvironment(3, os.getpid())\n del fakeEnvironment['LISTEN_FDS']\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())",
"def setupDbEnv(baseDirPath=None):\n global gDbEnv, gDbDirPath\n\n if not baseDirPath:\n baseDirPath = DATABASE_DIR_PATH\n\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n try:\n os.makedirs(baseDirPath)\n except OSError as ex:\n baseDirPath = ALT_DATABASE_DIR_PATH\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n os.makedirs(baseDirPath)\n else:\n if not os.access(baseDirPath, os.R_OK | os.W_OK):\n baseDirPath = ALT_DATABASE_DIR_PATH\n baseDirPath = os.path.abspath(os.path.expanduser(baseDirPath))\n if not os.path.exists(baseDirPath):\n os.makedirs(baseDirPath)\n\n gDbDirPath = baseDirPath # set global\n\n gDbEnv = lmdb.open(gDbDirPath, max_dbs=MAX_DB_COUNT)\n # creates files data.mdb and lock.mdb in dbBaseDirPath\n\n # create named dbs (core and tables)\n gDbEnv.open_db(b'core')\n gDbEnv.open_db(b'hid2did') # table of dids keyed by hids\n gDbEnv.open_db(b'did2offer', dupsort=True) # table of offer expirations keyed by offer relative dids\n gDbEnv.open_db(b'anon', dupsort=True) # anonymous messages\n gDbEnv.open_db(b'expire2uid', dupsort=True) # expiration to uid anon\n\n return gDbEnv",
"def clear_db_env():\n global _FACADE\n _FACADE = None",
"def sdss_env(request):\n m = request.getfixturevalue(\"monkeypatch\")\n for p in ('PHOTO_CALIB', 'PHOTO_DATA', 'BOSS_PHOTOOBJ', 'PHOTO_REDUX',\n 'PHOTO_RESOLVE', 'PHOTO_SKY', 'PHOTO_SWEEP'):\n m.setenv(p, '/' + p)\n return m",
"def set_test_environment():\n import flask_monitoringdashboard\n\n flask_monitoringdashboard.config.database_name = 'sqlite:///test-database.db'",
"def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))",
"def test_defaultEnviron(self):\n self.patch(os, 'environ', {\n 'LISTEN_PID': str(os.getpid()),\n 'LISTEN_FDS': '5'})\n sddaemon = ListenFDs.fromEnvironment()\n self.assertEqual(list(range(3, 3 + 5)),\n sddaemon.inheritedDescriptors())",
"def test_blastall_fp(self):\r\n\r\n blastall = self.config[\"blastall_fp\"]\r\n if not self.config[\"blastall_fp\"].startswith(\"/\"):\r\n # path is relative, figure out absolute path\r\n blast_all = which(blastall)\r\n if not blast_all:\r\n raise ApplicationNotFoundError(\r\n \"blastall_fp set to %s, but is not in your PATH. Either use an absolute path to or put it in your PATH.\" %\r\n blastall)\r\n self.config[\"blastall_fp\"] = blast_all\r\n\r\n test_qiime_config_variable(\"blastall_fp\", self.config, self, X_OK)",
"def setup(db_file):\n global session\n\n db_conn = \"sqlite:///%s\" % db_file\n logger.info(\"DB Connection: %s\" % db_conn)\n engine = create_engine(db_conn, connect_args={'check_same_thread':False})\n engine.Echo = True\n Base.metadata.create_all(engine)\n\n Session = scoped_session(sessionmaker(bind=engine))\n session = Session()\n print \"DB Connection: %s\" % db_conn",
"def os_start_db( self, ):\r\n pass",
"def init_env():\r\n os.environ.setdefault('SPIDERPATH',SPIDERPATH)\r\n sys.path.append(SPIDERPATH)\r\n print os.environ.get(\"SPIDERPATH\")\r\n os.environ.setdefault('EMAIL',EMAIL_VAR)\r\n #print os.environ.get(\"EMAIL\")\r",
"def openDB(self, dbpath, FskHz):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), FskHz)",
"def test_db(self, env: yaenv.Env):\n _db = {\n 'ENGINE': yaenv.db.SCHEMES['sqlite'],\n 'NAME': 'db.sqlite3',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n assert env.db('DB_URL') == _db\n _db = {\n 'ENGINE': yaenv.db.SCHEMES['sqlite'],\n 'NAME': ':memory:',\n }\n assert env.db('DB_URL_DEFAULT', 'sqlite://:memory:') == _db\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.db('INVALID_URL', 'invalid')\n assert 'Invalid database' in str(err.value)\n assert env.db('MISSING') is None",
"def env_init(self, environ) -> None:\n environ.update(self._env)",
"def before_request():\n g.db = connect_db()",
"def before_request():\n g.db = connect_db()",
"def before_request():\n g.db = connect_db()",
"def before_request():\n g.db = connect_db()",
"def bind_env(self):\r\n\r\n self.level = self.get_env(\"LEVEL\", self.level)\r\n self.diag = self.get_env(\"DIAG\", self.diag, cast = bool)\r\n self.middleware = self.get_env(\"MIDDLEWARE\", self.middleware, cast = list)\r\n self.children = self.get_env(\"CHILD\", self.children, cast = int)\r\n self.children = self.get_env(\"CHILDREN\", self.children, cast = int)\r\n self.logging = self.get_env(\"LOGGING\", self.logging)\r\n self.poll_name = self.get_env(\"POLL\", self.poll_name)",
"def setdb():\n\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')",
"def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)",
"def __set_database_info(self):\n if self.service == \"sigs\":\n self.database_name = \"sigs_local\"\n # dict of dump files and the tables associated\n self.dump_files = {\n \"01_auth.sql\": [\"auth_group\", \"auth_group_permissions\", \"auth_permission\", \"auth_user\",\n \"auth_user_groups\", \"auth_user_user_permissions\"],\n \"02_django.sql\": [\"django_content_type\", \"django_migrations\", \"django_admin_log\", \"django_session\"],\n \"03_daily_irradience.sql\": [\"solar_models_dailyglobalirradiance\", \"solar_models_hourlyglobalirradiance\"]\n }",
"def test_database_open_twice(tmpdir):\n path = str(tmpdir / \"file.db\")\n with forest.drivers.eida50.Database(path):\n pass\n with forest.drivers.eida50.Database(path):\n pass",
"def setup_database(self):\n self.db.setup_database()",
"def _initialize_db(self, db_location):\n channel = SOCKET_ARGS['channel']\n self.db_path = os.path.join(db_location, '{}.db'.format(channel))\n engine = sqlalchemy.create_engine('sqlite:///{}'.format(self.db_path), connect_args={'check_same_thread':False})\n # noinspection PyPep8Naming\n session_factory = sessionmaker(bind=engine)\n db.Base.metadata.create_all(engine)\n db_session = session_factory()\n misc_values = db_session.query(db.MiscValue).all()\n if len(misc_values) == 0:\n db_session.add_all([\n db.MiscValue(mv_key='guess-total-enabled', mv_value='False'),\n db.MiscValue(mv_key='current-deaths', mv_value='0'),\n db.MiscValue(mv_key='total-deaths', mv_value='0'),\n db.MiscValue(mv_key='guessing-enabled', mv_value='False')])\n db_session.commit()\n db_session.close()\n return session_factory"
]
| [
"0.545697",
"0.5289924",
"0.51876014",
"0.51839024",
"0.51618207",
"0.51511234",
"0.51040024",
"0.50811124",
"0.50119406",
"0.4972091",
"0.48819095",
"0.48758987",
"0.48557723",
"0.48404726",
"0.47968003",
"0.47911075",
"0.4788468",
"0.4778985",
"0.4770109",
"0.47643057",
"0.47643057",
"0.47643057",
"0.47643057",
"0.47611699",
"0.47531137",
"0.47480428",
"0.47357294",
"0.4732237",
"0.47317734",
"0.46850812"
]
| 0.7345562 | 0 |
Convert a list of colors to an array of fiber IDs | def colorsToFibers(colors):
return np.array(sorted(set(sum([FIBER_COLORS[col] for col in colors], [])))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_colours(colours_list, colours_required):\n colour_range = spectra.range(colours_list, colours_required)\n return [colour.hexcode for colour in colour_range]",
"def get_colors(color_list):\n rgba_colors = []\n a = [0.5,0.5,0.6,0.4,0.3,0.2]\n i = 0\n for c in color_list:\n rgba_colors.append(list(colors.to_rgba(c)))\n rgba_colors[i][3] = a[i]\n i+=1\n\n return rgba_colors",
"def get_colors(num_colors):\n import colorsys\n colors = []\n for i in np.arange(0., 360., 360. / num_colors):\n hue = i/360.\n lightness = (50 + np.random.rand() * 10)/100.\n saturation = (90 + np.random.rand() * 10)/100.\n colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))\n return colors",
"def getColors():\n return ['#8c99fc', '#cacefd', '#fff1d7', '#feda98', '#fda85a', '#fc6647']",
"def _get_colors(num_colors):\n cmap = plt.get_cmap()\n return [cmap(1. * i / num_colors) for i in range(num_colors)]",
"def get_color_map_in_hex(rgb_colors):\n list_of_hex_colors = []\n # Iterating through the list of colors given\n for i in range(len(rgb_colors)):\n rgb = []\n # Iterating through each rgb to get them into a range of 0-255\n for j in range(3):\n num = int(rgb_colors[i][j] * 255)\n rgb.append(num)\n # Converting the rgb to hex and appending them to a new list\n list_of_hex_colors.append(rgb_to_hex(rgb))\n return list_of_hex_colors",
"def extract_colors(self, palette, colors):\n return [palette[i:i + 3] for i in range(0, colors * 3, 3)]",
"def _color_brew(n):\n color_list = []\n\n # Initialize saturation & value; calculate chroma & value shift\n s, v = 0.75, 0.9\n c = s * v\n m = v - c\n\n for h in np.arange(25, 385, 360. / n).astype(int):\n # Calculate some intermediate values\n h_bar = h / 60.\n x = c * (1 - abs((h_bar % 2) - 1))\n # Initialize RGB with same hue & chroma as our color\n rgb = [(c, x, 0),\n (x, c, 0),\n (0, c, x),\n (0, x, c),\n (x, 0, c),\n (c, 0, x),\n (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n # Shift the initial RGB values to match value and store\n rgb = [(int(255 * (r + m))),\n (int(255 * (g + m))),\n (int(255 * (b + m)))]\n color_list.append(rgb)\n\n return color_list",
"def create_colors_list(color_dict):\r\n ret = []\r\n for i in range(len(color_dict)):\r\n ret.append('#' + color_dict[i]['@rgb'])\r\n return ret",
"def getColors():\n colors = ['#d53e4f',\n '#fc8d59',\n '#fee08b',\n '#ffffbf',\n '#e6f598',\n '#99d594',\n '#3288bd',\n ]\n return colors",
"def _generate_colors(\n self, x: NDArrayFloat\n ) -> Sequence[RGBHexColor | None]:\n x = np.asarray(x)\n idx = np.round((x * 255) + ROUNDING_JITTER).astype(int)\n arr = np.column_stack(\n [self._r_lookup[idx], self._g_lookup[idx], self._b_lookup[idx]]\n )\n return [rgb_to_hex(c) for c in arr]",
"def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]",
"def some_colors(number = 5):\n import colorsys\n N = number\n HSV_tuples = [(x*1.0/N, 1.0, 1.0) for x in range(N)]\n RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)\n\n # if only one color is required don't put in in the list\n if number == 1:\n RGB_tuples = RGB_tuples\n return RGB_tuples",
"def get_color_list(cluster_count):\n color_list = []\n for i in xrange(cluster_count):\n color_list.append(random_color_gen())\n return color_list",
"def _string_to_colors(self):\n string = self.str_colors\n colors_three = [string[c:c+3] for c in range(0, len(string), 3)]\n colors_three = [list(color) for color in colors_three]\n pixels = [[ord(rgb) for rgb in color] for color in colors_three]\n return pixels",
"def unique_colors_rgb(n):\r\n hues = []\r\n # i is in the range 0, 1, ..., n - 1\r\n for i in range(1, n + 1):\r\n hues.append(360.0 / i)\r\n\r\n hs = []\r\n for hue in hues:\r\n h = math.floor(hue / 60) % 6\r\n hs.append(h)\r\n\r\n fs = []\r\n for hue in hues:\r\n f = hue / 60 - math.floor(hue / 60)\r\n fs.append(f)\r\n\r\n rgbcolors = []\r\n for h, f in zip(hs, fs):\r\n v = 1\r\n p = 0\r\n q = 1 - f\r\n t = f\r\n if h == 0:\r\n color = v, t, p\r\n elif h == 1:\r\n color = q, v, p\r\n elif h == 2:\r\n color = p, v, t\r\n elif h == 3:\r\n color = p, q, v\r\n elif h == 4:\r\n color = t, p, v\r\n elif h == 5:\r\n color = v, p, q\r\n rgbcolors.append(color)\r\n\r\n return rgbcolors",
"def get_colors(n, cmap=\"viridis\", start=0.0, stop=1.0, alpha=1.0, return_hex=False):\n colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)]\n colors = [(r, g, b, alpha) for r, g, b, _ in colors]\n if return_hex:\n colors = _rgb_color_list_to_hex(colors)\n return colors",
"def get_colors(num_colors, alpha):\n cs = sns.color_palette(palette=\"colorblind\", n_colors=num_colors)\n cs = [list(c) for c in cs]\n for i in range(len(cs)):\n cs[i].append(alpha)\n return cs",
"def hex2rgb(colors):\n if 'str' in str(type(colors)):\n colors = np.array([colors])\n\n rgbcolors = list(map(lambda x: matplotlib.colors.to_rgb(x), colors))\n return np.array(rgbcolors)",
"def create_cycler_colors(color_scheme):\n cmap = cm.get_cmap(color_scheme) # PiYG\n cycler_colors = []\n\n for i in range(cmap.N):\n rgba = cmap(i)\n # rgb2hex accepts rgb or rgba\n cycler_colors.append(matplotlib.colors.rgb2hex(rgba)) \n \n return cycler_colors",
"def create_funky_cmap(n_colors):\n\n colors = []\n for i in np.arange(0., 360., 360. / n_colors):\n h = i / 360.\n l = (50 + np.random.rand() * 10) / 100.\n s = (90 + np.random.rand() * 10) / 100.\n colors.append(hls_to_rgb(h, l, s))\n\n return colors",
"def get_color_list(self):\n lst = []\n\n _lib.caca_get_dither_color_list.argtypes = [_Dither]\n _lib.caca_get_dither_color_list.restype = ctypes.POINTER(ctypes.c_char_p)\n\n for item in _lib.caca_get_dither_color_list(self):\n if item is not None and item != \"\":\n lst.append(item)\n else:\n #memory occurs otherwise\n break\n\n return lst",
"def getpalette(data):\n\tpalette = []\n\tstring = StringIO(data)\n\twhile True:\n\t\ttry:\n\t\t\tpalette.append(unpack(\"<4B\", string.read(4)))\n\t\texcept StructError:\n\t\t\tbreak\n\treturn palette",
"def get_rgb_light():\n return list(light.rgb())",
"def convert_color(self, color):\n return [color[0]*16, color[1]*16, color[2]*16]",
"def get_color(self):\n colors = []\n color_specs = [self._red_spec, self._green_spec,\n self._blue_spec, self._white_spec]\n for spec in color_specs:\n driver = DRIVERS[spec.addr]\n colors.append(driver.get_duty_cycle(spec.pin))\n \n return colors",
"def get_colour(value):\n \n np.random.seed(value)\n colour = [np.random.uniform() for i in range(3)]\n return (tuple(colour))",
"def colorscale_to_colors(colorscale):\n color_list = []\n for color in colorscale:\n color_list.append(color[1])\n return color_list",
"def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]",
"def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret"
]
| [
"0.6882833",
"0.6552208",
"0.64129776",
"0.6288388",
"0.6265368",
"0.62227315",
"0.62189955",
"0.61927533",
"0.6179841",
"0.6153262",
"0.61470544",
"0.6112069",
"0.61112076",
"0.6054472",
"0.60415244",
"0.6002511",
"0.5993129",
"0.5938546",
"0.59206957",
"0.5844335",
"0.5809732",
"0.57845396",
"0.5731965",
"0.57268316",
"0.5717363",
"0.5710241",
"0.57071346",
"0.57033485",
"0.56937087",
"0.56731933"
]
| 0.7132494 | 0 |
Convert a list of colors to a hash for the pfiDesignId | def hashColors(colors):
return sum(HASH_COLORS[col] for col in set(colors)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compile_palette(palette_list):\n global _COMPILED_PALETTE\n _COMPILED_PALETTE = {}\n\n for color in palette_list:\n r_sum = math.fabs(color[0] ** _MAGNITUDE)\n g_sum = math.fabs(color[1] ** _MAGNITUDE)\n b_sum = math.fabs(color[2] ** _MAGNITUDE)\n\n _COMPILED_PALETTE[color] = [r_sum, g_sum, b_sum]",
"def hash_colors(vertex):\n\n def calculate_colors(v):\n \"\"\"\n Calculate the color for name of the given vertex v.\n :param v: Name of vertex to be hashed.\n :return: Tuple of (hue, saturation, lightness) values.\n \"\"\"\n\n # Define constant color values\n lightness = [0.35, 0.5, 0.65]\n saturation = [0.35, 0.5, 0.65]\n\n # Calculate the CRC-32 checksum of colors encoded as a UTF-8 string\n hash = crc32(str(v).encode('utf-8')) & 0xffffffff\n\n # Calculate the HSL (hue, saturation, lightness) values for the vertices\n hue = ((hash % 359) / 1000) * 360\n hash //= 360\n sat = saturation[hash % len(saturation)]\n hash //= len(saturation)\n lig = lightness[hash % len(lightness)]\n\n return (hue, sat, lig)\n\n def hsl_to_rgb(hsl):\n \"\"\"\n Convert HSL color value into RGB.\n :param hsl: HSL values for given vertex.\n :return: Tuple of (R, G, B) colors for vertex.\n \"\"\"\n try:\n h, s, l = hsl\n except TypeError:\n raise ValueError(hsl)\n try:\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n except TypeError:\n raise ValueError(hsl)\n\n rgb = []\n for c in (h + 1 / 3, h, h - 1 / 3):\n if c < 0:\n c += 1\n elif c > 1:\n c -= 1\n\n if c < 1 / 6:\n c = p + (q - p) * 6 * c\n elif c < 0.5:\n c = q\n elif c < 2 / 3:\n c = p + (q - p) * 6 * (2 / 3 - c)\n else:\n c = p\n rgb.append(round(c * 255))\n\n return tuple(rgb)\n\n raw_hsl = calculate_colors(vertex)\n return hsl_to_rgb(raw_hsl)",
"def create_colors_list(color_dict):\r\n ret = []\r\n for i in range(len(color_dict)):\r\n ret.append('#' + color_dict[i]['@rgb'])\r\n return ret",
"def hashColor(key, selected=False):\n\n def tw(t): return t ^ (t << (t % 5)) ^ (t << (6 + (t % 7))) ^ (t << (13 + (t % 11)))\n theHash = tw(hash(key) % 5003)\n ifsel = 0x00 if selected else 0x80\n (r, g, b) = (ifsel | (theHash & 0x7f),\n ifsel | ((theHash >> 8) & 0x7F),\n ifsel | ((theHash >> 16) & 0x7F))\n return \"#{0:02x}{1:02x}{2:02x}\".format(r, g, b)",
"def _rgb_color_list_to_hex(color_list):\n color_list_rgb = [[int(x * 255) for x in c[0:3]] for c in color_list]\n color_list_hex = [f\"#{rgb[0]:02X}{rgb[1]:02X}{rgb[2]:02X}\" for rgb in color_list_rgb]\n return color_list_hex",
"def _hash_color(obj):\n name_hash = hash(obj.name[:2])\n color = (\n (name_hash >> 16) % 256,\n (name_hash >> 8) % 256,\n name_hash % 256\n )\n mat_name = \"#%02X%02X%02X\" % color\n mat = (\n bpy.data.materials[mat_name] if mat_name in bpy.data.materials\n else bpy.data.materials.new(mat_name)\n )\n mat.diffuse_color = tuple([i / 256 for i in color])\n obj.data.materials.append(mat)",
"def colors(palette):\n all_colors = {\n 'cmyk': ['cian', 'magenta', 'yellow', 'black'],\n 'rgb': ['red', 'green', 'blue']\n }\n if palette == 'all':\n result = all_colors\n else:\n result = {palette: all_colors.get(palette)}\n return jsonify(result)",
"def hash_list(columns):\n md5 = hashlib.md5()\n for column in columns:\n md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))\n for column in columns:\n md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))\n return md5.hexdigest()",
"def unique_colors_rgb(n):\r\n hues = []\r\n # i is in the range 0, 1, ..., n - 1\r\n for i in range(1, n + 1):\r\n hues.append(360.0 / i)\r\n\r\n hs = []\r\n for hue in hues:\r\n h = math.floor(hue / 60) % 6\r\n hs.append(h)\r\n\r\n fs = []\r\n for hue in hues:\r\n f = hue / 60 - math.floor(hue / 60)\r\n fs.append(f)\r\n\r\n rgbcolors = []\r\n for h, f in zip(hs, fs):\r\n v = 1\r\n p = 0\r\n q = 1 - f\r\n t = f\r\n if h == 0:\r\n color = v, t, p\r\n elif h == 1:\r\n color = q, v, p\r\n elif h == 2:\r\n color = p, v, t\r\n elif h == 3:\r\n color = p, q, v\r\n elif h == 4:\r\n color = t, p, v\r\n elif h == 5:\r\n color = v, p, q\r\n rgbcolors.append(color)\r\n\r\n return rgbcolors",
"def hashThisList( theList ):\n\tthisString = \"\"\n\tfor i in theList:\n\t\tthisString += str( i )\n\n\tthisSha256Hash = hashlib.sha256(thisString).hexdigest()\n\n\treturn thisSha256Hash",
"def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists",
"def get_color_map_in_hex(rgb_colors):\n list_of_hex_colors = []\n # Iterating through the list of colors given\n for i in range(len(rgb_colors)):\n rgb = []\n # Iterating through each rgb to get them into a range of 0-255\n for j in range(3):\n num = int(rgb_colors[i][j] * 255)\n rgb.append(num)\n # Converting the rgb to hex and appending them to a new list\n list_of_hex_colors.append(rgb_to_hex(rgb))\n return list_of_hex_colors",
"def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))",
"def unique_colors(img):\n colors = {i[1] for i in img.getcolors(maxcolors=img.size[0]*img.size[1])}\n return colors",
"def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors",
"def colors(self):\n unique, counts = np.unique(self.arr, return_counts=True)\n return {k: v for (k, v) in zip(unique, counts)}",
"def uniquecolors(n):\n hues = [360.0 / n * i for i in range(n)]\n hs = [math.floor(hue / 60) % 6 for hue in hues]\n fs = [hue / 60 - math.floor(hue / 60) for hue in hues]\n return [('rgb({}%, {}%, {}%)'.format(*tuple(a * 100 for a in rgbcolor(h, f)))) for h, f in zip(hs, fs)]",
"def _to_plotly_palette(scl, transparence=None):\n if transparence:\n return ['rgb({0},{1},{2},{3})'.format(r*255, g*255, b*255, transparence) for r, g, b in scl]\n else:\n return ['rgb({0},{1},{2})'.format(r*255, g*255, b*255) for r, g, b in scl]",
"def ordered_colors():\n\n return [(\"yellow\",0.263) ,(\"orange\", 0.047), (\"red\",0.0),(\"green\", 0.444), (\"purple\", 0.972)]",
"def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors",
"def rgb2hex(color: List[int, int, int]) -> str:\n _validate_color(color)\n return \"#\" + \"\".join(\"{:02X}\".format(component) for component in color)",
"def set_colors(dictionary):\n colors = []\n for i in list(chain(*dictionary.values())):\n color = 'rgb' + str(tuple(np.random.choice(range(256), size=3)))\n colors.append((i, color))\n colors = dict(colors)\n\n return colors",
"def __init__(self, ordered_list):\n\n self.colors = dict((i, color) for (i, color) in enumerate(ordered_list))",
"def get_segment_colour_map(self, features):\n\n hashList = {'1' : 'Grey',\n '2':'Red',\n '3':'Green',\n '4':'greenyellow',\n '5':'Pink',\n '6':'Orange',\n '7':'goldenrod',\n '8':'indianred',\n '9':'peachpuff',\n '10':'deepskyblue',\n '11':'firebrick',\n '12':'orchid',\n '13': 'moccasin',\n '14':'slateblue',\n '15':'turquoise',\n '16':'tomato',\n '17':'darkmagenta',\n '18':'olivedrab'}\n return hashList",
"def _build_color_table() -> list[tuple[int, int, int, int, int]]:\n FG = FOREGROUND_COLOR\n BG = BACKGROUND_COLOR\n\n return [\n (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),\n (0x00, 0x00, 0xAA, FG.BLUE, BG.BLUE),\n (0x00, 0xAA, 0x00, FG.GREEN, BG.GREEN),\n (0x00, 0xAA, 0xAA, FG.CYAN, BG.CYAN),\n (0xAA, 0x00, 0x00, FG.RED, BG.RED),\n (0xAA, 0x00, 0xAA, FG.MAGENTA, BG.MAGENTA),\n (0xAA, 0xAA, 0x00, FG.YELLOW, BG.YELLOW),\n (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),\n (0x44, 0x44, 0xFF, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),\n (0x44, 0xFF, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),\n (0x44, 0xFF, 0xFF, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),\n (0xFF, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),\n (0xFF, 0x44, 0xFF, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),\n (0xFF, 0xFF, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),\n (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),\n (0xFF, 0xFF, 0xFF, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),\n ]",
"def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys",
"def format_hex(self, list_converted):\n dict_hex = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}\n list_converted = [dict_hex[n] if n in dict_hex.keys() else str(n) for n in list_converted]\n return list_converted",
"def hashdict(self):\n return {\n 'pix': super(cfft, self).hashdict(),\n 'fft': hashlib.sha1(self.fft.view(np.uint8)).hexdigest()\n }",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def _create_parameter_set_hashes(self):\n self._parameter_set_hashes = []\n for row in self._samples:\n set_values_catenation = ''.join(repr(element) for element in row)\n set_hash = hashlib.md5(set_values_catenation.encode('utf-8')).hexdigest()\n self._parameter_set_hashes.append(set_hash)"
]
| [
"0.6457131",
"0.6365841",
"0.6311491",
"0.624755",
"0.6181713",
"0.61611116",
"0.61419606",
"0.6136244",
"0.6124362",
"0.6113369",
"0.608489",
"0.60780436",
"0.6064027",
"0.5912958",
"0.59029126",
"0.5857813",
"0.58283913",
"0.5825066",
"0.5819124",
"0.5813369",
"0.5805134",
"0.5793683",
"0.5792158",
"0.5791229",
"0.5786068",
"0.57837427",
"0.5778414",
"0.575857",
"0.5747947",
"0.5732988"
]
| 0.70530593 | 0 |
Catches a SIGINT and cleans up | def sigint_handler(sig, frame):
print("[i] Caught SIGINT, cleaning up...")
server.close()
exit(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_sigint(signum, frame):\n print(\"\\nInterrupted by SIGINT\\n\")\n sys.exit()",
"def SIGINT_handler(signal, frame):\n exit(2)",
"def keyboard_interrupt_handler(sig: int, _: object) -> None:\n logger.warning(f'KeyboardInterrupt (id: {sig}) has been caught...')\n logger.info('Terminating the session gracefully...')\n ray.shutdown()\n minio_leftovers = glob('*.part.minio')\n for leftover in minio_leftovers:\n Path(leftover).unlink()\n sys.exit(1)",
"def interrupt_handler(signum, frame): #pylint: disable=W0613\n cleanup()\n sys.exit(-2) # Terminate process here as catching the signal\n # removes the close process behaviour of Ctrl-C",
"def SIGINT(self, signum, frame):\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n if hasattr(t, 'stop') and callable(t.stop):\n t.stop()\n\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n t.join()\n\n self.exit(1)",
"def sigterm_handler(signal, frame):\n GPIO.cleanup()\n print('WARN : %s Received Kill' % PROG_NAME)\n print('INFO : Performed GPIO.cleanup. Bye ...')\n sys.exit(0)",
"def __sigint_handler(signal, frame):\n logging.debug(\"SIGINT or SIGTERM catched\")\n logging.debug(\"Raise t_stop_event\")\n t_stop_event.set() # Set stop flag to true for all launched threads\n logging.info(\"Stopping daemons...\")\n sleep(1)",
"def _init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)",
"def init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)",
"def hash_pool_initializer():\n signal.signal(signal.SIGINT, signal.SIG_IGN)",
"def cli():\n signal.signal(signal.SIGINT, signal_handler)\n pass",
"def signal_handler(self, sig, frame):\n GPIO.cleanup()\n sys.exit(0)",
"def signal_handler(signum, frame):\n sys.exit(0)",
"def sigint_handler(*dummy):\n print \"Received SIGINT. Stopping everything.\"\n executor.Stop()\n server.Stop()",
"def _sigint(self, signal, frame):\n self.disconnect = True\n if self.cardinal:\n self.cardinal.quit('Received SIGINT.')",
"def signal_handler(signum, frame):\n self.log.error(\"Received SIGTERM. Terminating subprocesses\")\n self.task_runner.terminate()\n self.handle_task_exit(128 + signum)",
"def handle_signals(signal, frame):\n\n GPIO.cleanup()\n os._exit(0)",
"def signal_handler(sig, frame):\n sys.exit(0)",
"def graceful_exit():\n print(\"KeyboardInterrupt, exit(1)\")\n exit(1)",
"def worker_initializer():\n signal.signal(signal.SIGINT, signal.SIG_IGN)",
"def signal_handler(signal, frame):\n sys.exit(0)",
"def signal_handler(signal, frame):\n sys.exit(0)",
"def signal_handler(signal, frame):\n sys.exit(0)",
"def finish(self):\n super(InterruptibleMixin, self).finish()\n signal(SIGINT, self.original_handler)",
"def _signal_handler(signum, frame):\n res_mgr()\n sys.exit(0)",
"def interrupt_handler(signum, frame):\n if DOCKER_MONITOR:\n util.log.warning(\"Signal %d received - Tearing down monitoring\"\n % (signum))\n DOCKER_MONITOR.tear_down_all()\n sys.exit(0)",
"def signal_handler(signal_number, stack_frame):\n if signal_number in [signal.SIGTERM, signal.SIGINT]:\n terminate_surveillance()",
"def sigterm_handler(signum: int, stack_frame) -> None:\n\n del signum\n del stack_frame\n\n logger.info(\"Terminating promqtt. Bye!\")\n sys.exit(0)",
"def sigint_handler(signal, frame):\n rclpy.shutdown()\n if prev_sigint_handler is not None:\n prev_sigint_handler(signal)",
"def register_exit_signals(self):\n signal.signal(signal.SIGINT, self._exit_gracefully)\n signal.signal(signal.SIGTERM, self._exit_gracefully)\n # So that we ignore the debug dump signal, making it easier to send\n signal.signal(signal.SIGUSR2, signal.SIG_IGN)"
]
| [
"0.73255545",
"0.71945477",
"0.71799266",
"0.70329976",
"0.68681586",
"0.68055373",
"0.67651457",
"0.6753512",
"0.6715791",
"0.6654692",
"0.65958405",
"0.65705025",
"0.6526282",
"0.6510839",
"0.6495898",
"0.6482181",
"0.64782625",
"0.6475241",
"0.641838",
"0.640692",
"0.6401365",
"0.6401365",
"0.6401365",
"0.6386979",
"0.6355198",
"0.63420856",
"0.63348323",
"0.6330865",
"0.63014364",
"0.627779"
]
| 0.7246517 | 1 |
Set the width of all the columns at once, taking the percentages from the passed list. | def set_column_widths(self, clist):
self.columns = len(clist)
for i in range(self.columns):
self.colwid[i] = clist[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def set_column_width(self, index, width):\n self.colwid[index] = width",
"def adjust_columns(self):\r\n for col in range(3):\r\n self.resizeColumnToContents(col)",
"def col_width_percent(self,column_no): \n return float(self.col_width(column_no)*100)/self.total_width()",
"def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))",
"def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerate(columns):\r\n name = col_name.get(column,_(column))\r\n width = col_width.get(column,30)\r\n align = wxListAligns[col_align.get(column,'LEFT')]\r\n self.gList.InsertColumn(index,name,align)\r\n self.gList.SetColumnWidth(index, width)",
"def _calculate_column_widths(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> List[int]:\n max_width = options.max_width\n columns = self.columns\n width_ranges = [\n self._measure_column(console, options, column) for column in columns\n ]\n widths = [_range.maximum or 1 for _range in width_ranges]\n get_padding_width = self._get_padding_width\n extra_width = self._extra_width\n if self.expand:\n ratios = [col.ratio or 0 for col in columns if col.flexible]\n if any(ratios):\n fixed_widths = [\n 0 if column.flexible else _range.maximum\n for _range, column in zip(width_ranges, columns)\n ]\n flex_minimum = [\n (column.width or 1) + get_padding_width(column._index)\n for column in columns\n if column.flexible\n ]\n flexible_width = max_width - sum(fixed_widths)\n flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)\n iter_flex_widths = iter(flex_widths)\n for index, column in enumerate(columns):\n if column.flexible:\n widths[index] = fixed_widths[index] + next(iter_flex_widths)\n table_width = sum(widths)\n\n if table_width > max_width:\n widths = self._collapse_widths(\n widths,\n [(column.width is None and not column.no_wrap) for column in columns],\n max_width,\n )\n table_width = sum(widths)\n # last resort, reduce columns evenly\n if table_width > max_width:\n excess_width = table_width - max_width\n widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)\n table_width = sum(widths)\n\n width_ranges = [\n self._measure_column(console, options.update_width(width), column)\n for width, column in zip(widths, columns)\n ]\n widths = [_range.maximum or 0 for _range in width_ranges]\n\n if (table_width < max_width and self.expand) or (\n self.min_width is not None and table_width < (self.min_width - extra_width)\n ):\n _max_width = (\n max_width\n if self.min_width is None\n else min(self.min_width - extra_width, max_width)\n )\n pad_widths = ratio_distribute(_max_width - table_width, widths)\n widths = [_width + pad for _width, pad in zip(widths, pad_widths)]\n\n return widths",
"def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2",
"def anchuras(hoja):\r\n return [hoja.computed_column_width(i) for i in range(hoja.ncols)]",
"def set_width(self, *args):\n return _ida_hexrays.lvar_t_set_width(self, *args)",
"def width(self, width):\n self.col += width",
"def auto_width(sheet):\r\n column_widths = []\r\n for row in sheet.iter_rows():\r\n for i, cell in enumerate(row):\r\n try:\r\n # if cell alignment is vertical, use 4, else len(str(cell.value)\r\n if cell.alignment.textRotation == 90:\r\n cell_w = 4\r\n else:\r\n cell_w = len(str(cell.value))\r\n column_widths[i] = max(column_widths[i], cell_w)\r\n except IndexError:\r\n column_widths.append(len(str(cell.value)))\r\n\r\n for i, column_width in enumerate(column_widths):\r\n # 1.4 is a fudge factor for variable width fonts\r\n sheet.column_dimensions[get_column_letter(i + 1)].width = column_width * 1.4",
"def _refresh_width(self):\n self._width = curses.tigetnum('cols')\n self._writer = formatter.DumbWriter(self._output, maxcol=self._width)",
"def percent_space(self):\n self.custom_space(*[0,0,100,100])",
"def SetColumnWidth(self, column, width):\r\n\r\n if width == wx.LIST_AUTOSIZE_USEHEADER:\r\n \r\n font = self._header_win.GetFont()\r\n dc = wx.ClientDC(self._header_win)\r\n width, dummy, dummy = dc.GetMultiLineTextExtent(self._header_win.GetColumnText(column))\r\n # Search TreeListHeaderWindow.OnPaint to understand this:\r\n width += 2*_EXTRA_WIDTH + _MARGIN\r\n \r\n elif width == wx.LIST_AUTOSIZE:\r\n \r\n width = self._main_win.GetBestColumnWidth(column)\r\n \r\n self._header_win.SetColumnWidth(column, width)\r\n self._header_win.Refresh()",
"def create_w_tbl(index: int, entries: int) -> List[float]:\n min_w = StackupTestHelper.index_to_min_width_fn(index)\n return list(map(lambda x: min_w*x, range(1, 4 * entries + 1, 4)))",
"def setWidth(self, width):\n self._reconfig(\"width\", width)",
"def setPointWidth(self, width):\n for point in self.points:\n point.width = width",
"def _SetWidth(self, column_index, content_length):\n # Updates the width at position column_index to be the max of the existing\n # value and the new content's length, or this instance's max_column_width if\n # the value would be greater than max_column_width.\n if column_index == len(self._widths):\n self._widths.append(0)\n\n new_width = max(self._widths[column_index], content_length)\n if self._max_column_width is not None:\n new_width = min(self._max_column_width, new_width)\n self._widths[column_index] = new_width"
]
| [
"0.6759513",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6526067",
"0.6467374",
"0.6360108",
"0.61599517",
"0.5957372",
"0.5752069",
"0.56696403",
"0.5639448",
"0.5638654",
"0.5615438",
"0.5574988",
"0.55535036",
"0.55260116",
"0.5486367",
"0.5437358",
"0.543276",
"0.53821284",
"0.5334457",
"0.53204095"
]
| 0.7743246 | 0 |
Set the width of a specified column to the specified width. | def set_column_width(self, index, width):
self.colwid[index] = width | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetColumnWidth(self, column, width):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n\r\n self._total_col_width -= self._columns[column].GetWidth()\r\n self._columns[column].SetWidth(width)\r\n self._total_col_width += width\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True",
"def setColumnWidth(self, column, newWidth = None):\n\t\t\t\tdef yieldWidths():\n\t\t\t\t\tfor i, row in enumerate(self.thing.iter_rows(), start = 1):\n\t\t\t\t\t\twidth = self.getCellWidth(i, column)\n\t\t\t\t\t\tif (width is not None):\n\t\t\t\t\t\t\tyield width\n\n\t\t\t\tif (newWidth is None):\n\t\t\t\t\t#Find the longest cell in the column\n\t\t\t\t\tpossibleWidths = tuple(yieldWidths())\n\t\t\t\t\tif (possibleWidths):\n\t\t\t\t\t\tnewWidth = max(possibleWidths)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewWidth = -1 #Compensate for blank columns\n\n\t\t\t\t#Apply the new width\n\t\t\t\tnewWidth += 2\n\t\t\t\tself.thing.column_dimensions[openpyxl.utils.get_column_letter(column)].width = newWidth",
"def SetColumnWidth(self, column, width):\r\n\r\n if width == wx.LIST_AUTOSIZE_USEHEADER:\r\n \r\n font = self._header_win.GetFont()\r\n dc = wx.ClientDC(self._header_win)\r\n width, dummy, dummy = dc.GetMultiLineTextExtent(self._header_win.GetColumnText(column))\r\n # Search TreeListHeaderWindow.OnPaint to understand this:\r\n width += 2*_EXTRA_WIDTH + _MARGIN\r\n \r\n elif width == wx.LIST_AUTOSIZE:\r\n \r\n width = self._main_win.GetBestColumnWidth(column)\r\n \r\n self._header_win.SetColumnWidth(column, width)\r\n self._header_win.Refresh()",
"def width(self, width):\n self.col += width",
"def _SetWidth(self, column_index, content_length):\n # Updates the width at position column_index to be the max of the existing\n # value and the new content's length, or this instance's max_column_width if\n # the value would be greater than max_column_width.\n if column_index == len(self._widths):\n self._widths.append(0)\n\n new_width = max(self._widths[column_index], content_length)\n if self._max_column_width is not None:\n new_width = min(self._max_column_width, new_width)\n self._widths[column_index] = new_width",
"def set_column_autowidth(worksheet: Worksheet, column: int):\n maxwidth = get_column_width(worksheet=worksheet, column=column)\n if maxwidth is None:\n return\n worksheet.set_column(first_col=column, last_col=column, width=maxwidth)",
"def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])",
"def setWidth(self, width):\n self._reconfig(\"width\", width)",
"def set_width(self, width):\n self.width = width",
"def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)",
"def set_width(self, width):\n self.__width = width",
"def SetColumn(self, column, info):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n w = self._columns[column].GetWidth()\r\n self._columns[column] = info\r\n \r\n if w != info.GetWidth():\r\n self._total_col_width += info.GetWidth() - w\r\n self._owner.AdjustMyScrollbars()\r\n \r\n self._owner._dirty = True",
"def setWidth(self, width):\n if not self._width:\n self._width = int(width)",
"def col_width(self,column_no): \n if(column_no == 0 and self.serialize):\n return self.col_width_dict['_serial_'] \n column = self.columns[column_no - (1 if self.serialize else 0)]\n return self.col_width_dict[column]",
"def addColumn(self, *column):\n self.insertColumn(self._width, *column)",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def setWidth(*args):",
"def width(self, width):\n\n self._width = width",
"def width(self, width):\n\n self._width = width",
"def width(self, width):\n\n self._width = width",
"def width(self, value):\n self._el._parent.execute_script(\"arguments[0].width = arguments[1]\", self._el, value)\n self.changed = True"
]
| [
"0.8275198",
"0.8051967",
"0.7529646",
"0.7405566",
"0.7389673",
"0.7142246",
"0.7070048",
"0.68435705",
"0.6804257",
"0.6693521",
"0.66881835",
"0.65535086",
"0.65136516",
"0.6489625",
"0.6469331",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.64580953",
"0.6439933",
"0.6439933",
"0.6439933",
"0.6360022"
]
| 0.8418978 | 0 |
Defines if a right border in used | def set_right_border(self, val):
self.rborder = val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rightframeborderoff(cls):\n cls.right_frame['highlightthickness'] = 0",
"def border(self):\n ...",
"def rightbox(self):\r\n pass",
"def getBorderFlags():\n\treturn border_flag",
"def border(self):\r\n\t\treturn self._border",
"def set_left_border(self, val):\n self.lborder = val",
"def has_right(self):\n return self.r is not None",
"def has_right_sidebar(self):\n return True",
"def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val",
"def border(self):\n return self._border",
"def border(self):\n return self._border",
"def is_border(coord, sides):\n return coord[0] <= 0 or coord[0] >= sides[\"bottom\"]\\\n or coord[1] <= 0 or coord[1] >= sides[\"right\"]",
"def has_border(self):\n return self._border_actor is not None",
"def HasBorder(self):\r\n \r\n return self.HasFlag(self.optionPaneBorder)",
"def make_empty_right(self, e=0):\n self.make_empty_side(u'right')",
"def has_right(self):\n return self.right != None",
"def get_border(self):\n return self._border",
"def isRight(self):\n return self.right",
"def _has_right(self, j):\n return (2 * j + 2) < len(self)",
"def _add_border(self):\n top = TopWallCell(self)\n left = SideWallCell(self, False)\n right = SideWallCell(self, True)\n for col in range(self._columns):\n self.cell_at(col, self._rows - 1, top)\n for row in range(self._rows):\n self.cell_at(0, row, left)\n self.cell_at(self._columns - 1, row, right)",
"def right(self):\n if self.head.heading() != LEFT and self.last_direction != LEFT:\n self.head.setheading(RIGHT)",
"def has_right(self):\n return self.__right != None",
"def move_shape_right(self):\n if self.falling_shape:\n self.falling_shape.shift_shape_right_by_one_column()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.shift_shape_left_by_one_column()\n return False\n return True",
"def has_right(self, position):\n return self.right_child(position) is not None",
"def IsRightDockable(self):\r\n \r\n return self.HasFlag(self.optionRightDockable)",
"def bottom_right_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_right_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def is_right(self):\n if self.pupils_located:\n return self.horizontal_ratio() <= 0.35",
"def border_type(self):\n return self.container['border_type']",
"def getBorder(self):\n return self.__border",
"def wall_on_right(): #py:wall_on_right\n return RUR._wall_on_right_()"
]
| [
"0.68664896",
"0.6642183",
"0.642791",
"0.6375472",
"0.63121057",
"0.6302553",
"0.6234603",
"0.61621845",
"0.6157814",
"0.6145464",
"0.6145464",
"0.6138981",
"0.60701907",
"0.59811795",
"0.5954375",
"0.59495926",
"0.59238166",
"0.5905673",
"0.58858347",
"0.5884089",
"0.588203",
"0.58609366",
"0.58466905",
"0.5845336",
"0.5817068",
"0.5803644",
"0.5794446",
"0.57694066",
"0.57679445",
"0.5765079"
]
| 0.80175745 | 0 |
Defines if a left border in used | def set_left_border(self, val):
self.lborder = val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leftframeborderoff(cls):\n cls.left_frame['highlightthickness'] = 0",
"def _is_left_edge(self, ndx):\n if len(self._dims)== 1:\n return ndx == 0\n return ndx < self._dims[1]",
"def border(self):\n ...",
"def getBorderFlags():\n\treturn border_flag",
"def IsLeftDockable(self):\r\n \r\n return self.HasFlag(self.optionLeftDockable)",
"def is_left(self, line):\n return line.angle() < 0",
"def left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_middle_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_middle_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def draw_left(self):\n return group()",
"def make_empty_left(self, e=0):\n self.make_empty_side(u'left')",
"def border(self):\n sel=self.ch_border.isChecked()\n for i in [ self.sb_border_width, self.cb_style, self.b_color ]:\n i.setEnabled(sel)",
"def border(self):\r\n\t\treturn self._border",
"def left(self):\n if self.head.heading() != RIGHT and self.last_direction != RIGHT:\n self.head.setheading(LEFT)",
"def HasBorder(self):\r\n \r\n return self.HasFlag(self.optionPaneBorder)",
"def move_shape_left(self):\n if self.falling_shape:\n self.falling_shape.shift_shape_left_by_one_column()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.shift_shape_right_by_one_column()\n return False\n return True",
"def get_left_side(grid):\n right = int(grid.width / 2)\n left_side = Grid(\n grid=grid, crop=Crop(left=0, right=right, top=0, bottom=0))\n left_side.find_grid_lines()\n left_side.vert_insert_line(0, distance=-80)\n left_side.get_cells()\n left_side.get_row_labels()\n return left_side",
"def top_left_dot(self) -> bool:\n return bool(self._getindicator(1))",
"def Left_Left_fix(self, GP):\r\n P = GP.left\r\n U = GP.right\r\n ### D\r\n if U.color == \"red\":\r\n P.color = \"black\"\r\n U.color = \"black\"\r\n GP.color = \"red\"\r\n return GP\r\n else:\r\n GP.left = P.right\r\n P.right = GP\r\n # Fix the colors\r\n P.color = \"black\"\r\n GP.color = \"red\"\r\n return P",
"def HasCaptionLeft(self):\r\n \r\n return self.HasFlag(self.optionCaptionLeft)",
"def border(self):\n return self._border",
"def border(self):\n return self._border",
"def border_box_x(self):\n return self.position_x + self.margin_left",
"def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE",
"def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE",
"def is_left(self):\n if self.pupils_located:\n return self.horizontal_ratio() >= 0.65",
"def isLeft(self):\n return self.left",
"def left_padding_width(self):\n ...",
"def bottom_left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1",
"def _rect_left(self):\n\treturn min(self.x, self.x + self.w)",
"def set_right_border(self, val):\n self.rborder = val"
]
| [
"0.70413435",
"0.66685605",
"0.6345007",
"0.6264619",
"0.62617964",
"0.62232846",
"0.62126046",
"0.61926544",
"0.6165064",
"0.6120788",
"0.6105952",
"0.6072327",
"0.60005605",
"0.5992448",
"0.5982933",
"0.5981157",
"0.5977957",
"0.59509814",
"0.5895324",
"0.5895324",
"0.5891144",
"0.58845276",
"0.58845276",
"0.58702844",
"0.5868388",
"0.5852012",
"0.5809755",
"0.58061033",
"0.5799297",
"0.5789324"
]
| 0.8141564 | 0 |
Defines if a top border in used | def set_top_border(self, val):
self.tborder = val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def border(self):\n ...",
"def topBorderFor( player ):\n return centerTextAt( \"\", default_display_vars.borderChar_Top, getUserScreenWidth( player ) )",
"def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)",
"def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)",
"def getBorderFlags():\n\treturn border_flag",
"def border(self):\r\n\t\treturn self._border",
"def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))",
"def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE",
"def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE",
"def set_bottom_border(self, val):\n self.bborder = val",
"def HasBorder(self):\r\n \r\n return self.HasFlag(self.optionPaneBorder)",
"def has_border(self):\n return self._border_actor is not None",
"def isTop(self):\n return self.top",
"def setBorder():\n dislin.pagera()",
"def border(self):\n return self._border",
"def border(self):\n return self._border",
"def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))",
"def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1",
"def generate_topline(self) -> tuple:\n line_top = \"┌\" + \"┬\".join([\n \"─\" + \"─\" + \"─\" for _ in range(self.width)]) + \"┐\\n\"\n return (self.style, line_top)",
"def leftframeborderoff(cls):\n cls.left_frame['highlightthickness'] = 0",
"def keep_top_or_bottom(self):\n return self._keep_top_or_bottom",
"def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val",
"def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")",
"def rightframeborderoff(cls):\n cls.right_frame['highlightthickness'] = 0",
"def get_border(self):\n return self._border",
"def getExceedingBoxTop(self):\n return self.exceedingBoxTop",
"def draw_top(self):\n return group()",
"def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top",
"def check_borders(self):\n # Go Homer!\n # https://en.wikipedia.org/wiki/Torus#Flat_torus\n if self._posn.x < 0:\n self._posn.x += self._win_w\n elif self._posn.x > self._win_w:\n self._posn.x -= self._win_w\n if self._posn.y < 0:\n self._posn.y += self._win_h\n elif self._posn.y > self._win_h:\n self._posn.y -= self._win_h"
]
| [
"0.66174763",
"0.6470724",
"0.64533097",
"0.627558",
"0.6166172",
"0.6139952",
"0.6020186",
"0.60169846",
"0.60169846",
"0.6011903",
"0.60111123",
"0.60063803",
"0.5995756",
"0.5969622",
"0.58888805",
"0.58888805",
"0.58380824",
"0.5770998",
"0.57503366",
"0.5717747",
"0.57101965",
"0.5685563",
"0.5681053",
"0.56298393",
"0.5627993",
"0.5619785",
"0.5612977",
"0.5596082",
"0.55887073",
"0.55444396"
]
| 0.76243424 | 0 |
Defines if a bottom border in used | def set_bottom_border(self, val):
self.bborder = val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def border(self):\n ...",
"def bottomBorderFor( player ):\n return centerTextFor( player, \"\", default_display_vars.borderChar_Bottom )",
"def isBottom(self):\n return self.bottom",
"def HasBorder(self):\r\n \r\n return self.HasFlag(self.optionPaneBorder)",
"def borders(self):\n border_left = pm.Segment(self.space.static_body, (-5, 0), (-5, self.screen_height), 10)\n border_right = pm.Segment(self.space.static_body, (self.screen_width + 5, 0),\n (self.screen_width + 5, self.screen_height), 10)\n border_top = pm.Segment(self.space.static_body, (0, self.screen_height + 5),\n (self.screen_width, self.screen_height + 5), 10)\n border_bottom = pm.Segment(self.space.static_body, (0, 0), (self.screen_width, 0),\n self.screen_height * 0.1)\n border_bottom.friction = TERRAIN_FRICTION # Set the bottom border friction\n border_bottom.color = DARK_GREY # Set the bottom border color\n\n # Set the collision types so that the collision handlers check for them\n border_top.collision_type = 4\n border_left.collision_type = 4\n border_right.collision_type = 4\n border_bottom.collision_type = 4\n self.space.add(border_left, border_right, border_top, border_bottom) # Add the borders to the Pymunk space",
"def _is_bottom_edge(self, ndx):\n if len(self._dims) == 1:\n return True\n return (ndx % self._dims[1]) == self._dims[1]-1",
"def has_border(self):\n return self._border_actor is not None",
"def is_border(coord, sides):\n return coord[0] <= 0 or coord[0] >= sides[\"bottom\"]\\\n or coord[1] <= 0 or coord[1] >= sides[\"right\"]",
"def bottom_left_dot(self) -> bool:\n return bool(self._getindicator(2))",
"def getBorderFlags():\n\treturn border_flag",
"def is_bottom_edge_above(self, item_or_group):\n return not self.is_bottom_edge_below(item_or_group)",
"def border(self):\r\n\t\treturn self._border",
"def bb_bottom(self) -> float:\n return self._bb_bottom",
"def IsBottomSnappable(self):\r\n \r\n return self.HasFlag(self.optionBottomSnapped)",
"def BottomSnappable(self, b=True):\r\n \r\n return self.SetFlag(self.optionBottomSnapped, b)",
"def border(self):\n return self._border",
"def border(self):\n return self._border",
"def is_border(self, threshold):\n\n def dist_to_line(p1, p2, p3):\n return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)\n\n total_dist = 0\n for p in self.shape:\n total_dist += dist_to_line(self.shape[0], self.shape[-1], p)\n return total_dist < threshold",
"def bb_bottom(self, bb_bottom: float):\n\n self._bb_bottom = bb_bottom",
"def setBorder():\n dislin.pagera()",
"def setBorder(self, b):\n self.border = fn.mkPen(b)\n self.update()",
"def check_for_bottom(self) -> bool:\n\t\tboolean_expression_has_bottom = False\n\t\texpression_has_bottom = False\n\t\tif self.boolean_expression:\n\t\t\tboolean_expression_has_bottom = self.boolean_expression.check_for_bottom()\n\t\tif self.expression:\n\t\t\texpression_has_bottom = self.expression.check_for_bottom()\n\t\treturn boolean_expression_has_bottom or expression_has_bottom",
"def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val",
"def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)",
"def bottom_option():\n active = get_active_window()\n Width= get_middle_Width(active)\n Height=get_bottom_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def get_border(self):\n return self._border",
"def border(self):\n sel=self.ch_border.isChecked()\n for i in [ self.sb_border_width, self.cb_style, self.b_color ]:\n i.setEnabled(sel)",
"def draw_boundary() -> None:\n # Upper edge\n print(rpipes.terminal.move_xy(0, 0), WBorder.HORIZONTAL * (rpipes.terminal.width - 1))\n\n # Left and Right edges\n for row in range(rpipes.terminal.height - 2):\n print(\n WBorder.VERTICAL,\n rpipes.terminal.move_right(rpipes.terminal.width - 4),\n WBorder.VERTICAL,\n )\n\n # Bottom edge\n print(\n rpipes.terminal.move_xy(0, rpipes.terminal.height - 2),\n WBorder.HORIZONTAL * (rpipes.terminal.width - 1),\n )\n\n # Top left corner\n print(rpipes.terminal.move_xy(0, 0) + WBorder.DOWN_AND_RIGHT)\n\n # Top right corner\n print(rpipes.terminal.move_xy(rpipes.terminal.width - 1, 0) + WBorder.DOWN_AND_LEFT)\n\n # Bottom left corner\n print(rpipes.terminal.move_xy(0, rpipes.terminal.height - 2) + WBorder.UP_AND_RIGHT)\n\n # Bottom right corner\n print(\n rpipes.terminal.move_xy(rpipes.terminal.width - 1, rpipes.terminal.height - 2)\n + WBorder.UP_AND_LEFT\n )",
"def IsBottomDockable(self):\r\n \r\n return self.HasFlag(self.optionBottomDockable)",
"def getExceedingBoxBottom(self):\n return self.exceedingBoxBottom"
]
| [
"0.6883019",
"0.6749164",
"0.67282933",
"0.6515105",
"0.6510314",
"0.6445598",
"0.63622177",
"0.6343225",
"0.6306105",
"0.62943786",
"0.62870574",
"0.62784487",
"0.6187134",
"0.61684763",
"0.616482",
"0.61413944",
"0.61413944",
"0.61305714",
"0.61050344",
"0.60874295",
"0.6064503",
"0.60385734",
"0.60340726",
"0.5976981",
"0.59765214",
"0.5953923",
"0.59393245",
"0.5904063",
"0.5898037",
"0.58935577"
]
| 0.7958736 | 0 |
Initialize and return a tabix reader object for subsequent tabix_get() calls. | def tabix_init():
tabix = load_shared_library('tabix')
if (tabix == None): return None
tabix.ti_read.restype = c_char_p
# on Mac OS X 10.6, the following declarations are required.
tabix.ti_open.restype = c_void_p
tabix.ti_querys.argtypes = [c_void_p, c_char_p]
tabix.ti_querys.restype = c_void_p
tabix.ti_query.argtypes = [c_void_p, c_char_p, c_int, c_int]
tabix.ti_query.restype = c_void_p
tabix.ti_read.argtypes = [c_void_p, c_void_p, c_void_p]
tabix.ti_iter_destroy.argtypes = [c_void_p]
tabix.ti_close.argtypes = [c_void_p]
# FIXME: explicit declarations for APIs not used in this script
return tabix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)",
"def __init__(self, fp, thrift_base):\r\n ThriftRecordIO.assert_has_thrift()\r\n if not thrift_base:\r\n raise ThriftRecordIO.ThriftUnsuppliedException(\r\n 'Must construct ThriftRecordReader with valid thrift_base!')\r\n RecordIO.Reader.__init__(self, fp, ThriftRecordIO.ThriftCodec(thrift_base))",
"def getReader(self):\n return Tes3Reader(self.inName,cStringIO.StringIO(self.data))",
"def __init__(self):\n\n self.reader = reader.Reader()",
"def open_reader(\n self,\n partition=None,\n reopen=False,\n endpoint=None,\n download_id=None,\n timeout=None,\n arrow=False,\n columns=None,\n quota_name=None,\n async_mode=True,\n **kw\n ):\n\n from ..tunnel.tabletunnel import TableDownloadSession\n\n if partition and not isinstance(partition, odps_types.PartitionSpec):\n partition = odps_types.PartitionSpec(partition)\n tunnel = self._create_table_tunnel(endpoint=endpoint, quota_name=quota_name)\n download_ids = dict()\n if download_id is None:\n download_ids = self._download_ids\n download_id = download_ids.get(partition) if not reopen else None\n download_session = utils.call_with_retry(\n tunnel.create_download_session,\n table=self, partition_spec=partition, download_id=download_id,\n timeout=timeout, async_mode=async_mode, **kw\n )\n\n if download_id and download_session.status != TableDownloadSession.Status.Normal:\n download_session = utils.call_with_retry(\n tunnel.create_download_session,\n table=self, partition_spec=partition, timeout=timeout,\n async_mode=async_mode, **kw\n )\n download_ids[partition] = download_session.id\n\n if arrow:\n return TableArrowReader(self, download_session, columns=columns)\n else:\n return TableRecordReader(self, download_session, partition, columns=columns)",
"def reader(self):\n df = pd.read_csv(self.path)\n return df",
"def my_reader(filename, sheetname='Sheet1', separ=','):\r\n global df_read\r\n filename_list = filename.split('.')\r\n extension = filename_list[-1]\r\n if extension == 'csv':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'data':\r\n df_read = pd.read_csv(filename, sep=separ, header=None)\r\n if extension == 'txt':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'json':\r\n df_read = pd.read_json(filename)\r\n if extension == 'html':\r\n df_read = pd.read_html(filename)\r\n if extension == 'xls':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'xlsx':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'feather':\r\n df_read = pd.read_feather(filename)\r\n if extension == 'parquet':\r\n df_read = pd.read_parquet(filename)\r\n if extension == 'msg':\r\n df_read = pd.read_msgpack(filename)\r\n if extension == 'dta':\r\n df_read = pd.read_stata(filename)\r\n if extension == 'sas7bdat':\r\n df_read = pd.read_sas(filename)\r\n if extension == 'pkl':\r\n df_read = pd.read_pickle(filename)\r\n return df_read",
"def get_reader(self) -> ArchiveFileReader:\n return ArchiveFileReader(\n filename=self.datafile,\n serializer=self.serializer,\n compression=self.compression,\n decoder=self.decoder\n )",
"def _init_reader_schema(self, field_names=None):\n if field_names:\n return from_column_list(field_names)\n\n assert os.path.exists(self.db_path), \\\n 'db_path [{db_path}] does not exist'.format(db_path=self.db_path)\n with core.NameScope(self.name):\n # blob_prefix is for avoiding name conflict in workspace\n blob_prefix = scope.CurrentNameScope()\n workspace.RunOperatorOnce(\n core.CreateOperator(\n 'Load',\n [],\n [],\n absolute_path=True,\n db=self.db_path,\n db_type=self.db_type,\n load_all=True,\n add_prefix=blob_prefix,\n )\n )\n col_names = [\n blob_name[len(blob_prefix):] for blob_name in workspace.Blobs()\n if blob_name.startswith(blob_prefix)\n ]\n schema = from_column_list(col_names)\n return schema",
"def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)",
"def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)",
"def __init__(self, path, format, fields, skip_header=False,\n csv_reader_params={}, **kwargs):\n\n cache_path = os.path.join('tmp', (os.path.basename(path) + '.td'))\n try:\n with open(cache_path, 'rb') as f:\n examples = pickle.load(f)\n except:\n format = format.lower()\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format]\n\n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n reader = unicode_csv_reader(f, **csv_reader_params)\n elif format == 'tsv':\n reader = unicode_csv_reader(f, delimiter='\\t', **csv_reader_params)\n else:\n reader = f\n\n if format in ['csv', 'tsv'] and isinstance(fields, dict):\n if skip_header:\n raise ValueError('When using a dict to specify fields with a {} file,'\n 'skip_header must be False and'\n 'the file must have a header.'.format(format))\n header = next(reader)\n field_to_index = {f: header.index(f) for f in fields.keys()}\n make_example = partial(make_example, field_to_index=field_to_index)\n\n if skip_header:\n next(reader)\n\n examples = [make_example(line, fields) for line in reader]\n with open(cache_path, 'wb') as f:\n pickle.dump(examples, f)\n\n if isinstance(fields, dict):\n fields, field_dict = [], fields\n for field in field_dict.values():\n if isinstance(field, list):\n fields.extend(field)\n else:\n fields.append(field)\n\n super(TabularDataset, self).__init__(examples, fields, **kwargs)",
"def read_table(self, table):\n return READ_TABLE(table, db=self.db)",
"def from_pandas(self, obj, index=True):\n return Reader(_from_pandas(obj, index=index))",
"def test_reader_instantiates(self):\n cr = CaseReader(self.filename)\n self.assertTrue(isinstance(cr, HDF5CaseReader), msg='CaseReader not'\n ' returning the correct subclass.')",
"def get_reader_fn(input_fp=None):\n if input_fp is None:\n return OdpsTableReader if \"PAI\" in tf.__version__ else CSVReader\n\n if \"odps://\" in input_fp:\n return OdpsTableReader\n else:\n return CSVReader",
"def __init__(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n NexusReaderBase.__init__(self, -1)\n self.taxa = None\n self._data_matrices = None",
"def __init__(self, iReader):\n self.__index_reader = iReader",
"def read(cls, filename):\n table = Table.read(filename)\n return cls.from_table(table)",
"def CSVReader(self, input_file):\n f = open(input_file, 'r')\n reader = csv.reader(f)\n headers = reader.next()\n reader = csv.DictReader(f, headers)\n return reader",
"def get_gtf_reader(gtf_path):\n return HTSeq.GFF_Reader(gtf_path)",
"def __init__(self, resource, sheet=None, encoding=None, skip_rows=None, read_header=True):\r\n self.resource = resource\r\n self.sheet_reference = sheet\r\n self.read_header = read_header\r\n self.header_row = 0\r\n self.skip_rows = skip_rows\r\n self._fields = None\r\n self.close_file = True\r\n self.encoding = encoding\r\n self.fields = None",
"def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")",
"def start_reader(self):\n # if already started, return immediately\n if self.running:\n return\n\n # construct a new reader & start it\n self.reader = threading.Thread(target = self.read_data)\n self.reader.start()",
"def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)",
"def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)",
"def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])",
"def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())",
"def fastqa_reader(resources_or_conf: Union[dict, SharedResources] = None):\n from jack.readers.extractive_qa.tensorflow.fastqa import FastQAModule\n from jack.readers.extractive_qa.shared import XQAInputModule, XQAOutputModule\n shared_resources = create_shared_resources(resources_or_conf)\n\n input_module = XQAInputModule(shared_resources)\n model_module = FastQAModule(shared_resources)\n output_module = XQAOutputModule()\n return TFReader(shared_resources, input_module, model_module, output_module)",
"def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)"
]
| [
"0.6326279",
"0.63021135",
"0.6254",
"0.607567",
"0.59633064",
"0.564539",
"0.5636553",
"0.5579402",
"0.55439866",
"0.5534084",
"0.5520355",
"0.5492369",
"0.5491401",
"0.54427826",
"0.54417884",
"0.54338014",
"0.54261786",
"0.5419037",
"0.5365143",
"0.5346173",
"0.5339926",
"0.52727836",
"0.52485406",
"0.52472675",
"0.52283746",
"0.5222425",
"0.52023035",
"0.5194956",
"0.5168113",
"0.51420295"
]
| 0.6618322 | 0 |
Performa query that read data ("select" statement) | def read(self, query):
t1 = time.time()
if self.database in ['redshift', 'postgres']:
ret = postgres_helper.fetchall(config=self.conf, sql=query)
else:
raise Exception("database not supported yet: '{}'"
.format(self.database))
t2 = time.time()
t = t2 - t1
print('Finished in {:.2f} seconds.'.format(t))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data",
"def get_data(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data",
"def runQueryRead(d, query):\n with d.session() as s:\n results = s.read_transaction(runQuery, query, True)\n return results",
"def execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except psycopg2.OperationalError as e:\n print(f\"The error '{e}' occurred\")",
"def selectData(self, sql: str) -> List:\n try:\n connection = self.connect()\n cursor = connection.cursor() \n data = cursor.execute(sql)\n result = data.fetchall() \n return result\n except Exception as e:\n logging.error(f'{self.cn} Exception: {e}', exc_info=1)\n logging.error(f'{self.cn} SQL: {sql}')",
"def read(self,s,v):\n self.cur.execute(s,v)\n data = self.cur.fetchall()\n return data",
"def quickSqlRead(self,s,v,withHeaders=False):\n self.conn()\n \n \n self.cur.execute(s,v)\n headers = [desc[0] for desc in self.cur.description]\n data = self.cur.fetchall()\n self.close()\n \n if withHeaders:\n return data, headers\n return data",
"def dbselect(cxn, query, payload):\n\tcursor = cxn.cursor()\n\tif not payload:\n\t\trows = cursor.execute(query)\n\telse:\n\t\trows = cursor.execute(query,payload)\n\tresults = []\n\tfor row in rows:\n\t\tresults.append(row)\n\tcursor.close()\n\treturn results",
"def select_all_data(conn, select_sql):\n cur = conn.cursor()\n cur.execute(select_sql)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)",
"def _read_query(self):\n try:\n # Open Google Drive and read the sql file\n self.query = GDrive().read_drive_file(self.input_source_id)\n except Exception as e:\n raise e",
"def fetch_data_from_db(query):\n cursor.execute(query)\n result = cursor.fetchall()\n return result",
"def get_data(db, columns, table, condition=\"\"):\n cur = db.cursor()\n cur.execute(SELECT.format(columns, table) + \" \" + condition)\n return cur.fetchall()",
"def read_sql(self):\n pass",
"def query3() :",
"def fetchSqlRecords(self, sql):\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n r = cursor.fetchall()\r\n cursor.close()\r\n return r",
"def _execute_query(self, sql):\n url = self.db_url + \"?\" + urlencode({'action': 'doQuery', 'SQL': sql})\n install_opener(build_opener(self.auth_handler, self.cookie_handler))\n response = urlopen(url)\n cookie_jar.save(ignore_discard=True)\n\n # Check for OK response\n line = response.readline()\n if bytes(line) != b\"#OK\\n\":\n raise Exception(response.readlines())\n\n # Skip rows until we reach QUERYTIMEOUT\n while True:\n line = bytes(response.readline())\n if line == b\"\":\n raise Exception(\"Unexpected end of file while reading result\"\n \"header\")\n elif line.startswith(b\"#QUERYTIMEOUT\"):\n break\n\n # Skip QUERYTIME\n if not(bytes(response.readline()).startswith(b\"#QUERYTIME\")):\n raise Exception(\"Don't understand result header!\")\n\n # Read column info\n # (also discards line with full list of column names)\n columns = []\n while True:\n line = bytes(response.readline())\n if not line.startswith(b\"#\"):\n break\n else:\n m = re.match(b\"^#COLUMN ([0-9]+) name=([\\w]+) \"\n b\"JDBC_TYPE=(-?[0-9]+) JDBC_TYPENAME=([\\w]+)\\n$\",\n line)\n if m is not None:\n columns.append(m.groups())\n else:\n raise Exception(\"Don't understand column info: \"+line)\n\n # Construct record type for the output\n types = [numpy_dtype[col[3]] for col in columns]\n try:\n # Python 2 compatible\n names = [col[1] for col in columns]\n dtype = np.dtype([(n, t) for n, t in zip(names, types)])\n except TypeError:\n # Python 3 compatible\n names = [col[1].decode() for col in columns]\n dtype = np.dtype([(n, t) for n, t in zip(names, types)])\n\n # Return the data as a record array\n return np.genfromtxt(response, dtype=dtype, delimiter=\",\")",
"def get_sql_data(query, *args):\n\n sql = query\n search_data = (args)\n\n try:\n assert sql.count('%s') == len(args)\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(sql, search_data)\n data = cursor.fetchall()\n cursor.close()\n conn.close()\n print(\"SUCCESS: get_sql_data\")\n return data\n\n except AssertionError:\n print(\"ERROR: get_sql_data(), wrong number of arguments\")",
"def query(sql):\n if (sql is None):\n raise Exception(\"SQL not specified\") \n try:\n database = App.instance().environment.database\n connection = psycopg2.connect(host=database.host, dbname=database.database, \n user=database.user, password=database.password)\n cursor = connection.cursor()\n cursor.execute(sql)\n fields = [ x[0] for x in cursor.description]\n return (fields, cursor.fetchall())\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Error connecting to database\", error)\n finally:\n if not connection is None:\n cursor.close()\n connection.close()",
"def select_all_lines(conn):\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM ayasdi_table\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print row",
"def query(self, sql):\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n return result",
"def selectOpt(self, sql): # select\n # apply connection rescource\n dbp_opt = dbPool()\n results = dbp_opt.opSelect(sql)\n # release connection rescource\n dbp_opt.dispose()\n return results",
"def get_data(self, sql):\n\n db = pyodbc.connect('DSN=' + self.dsn)\n cursor = db.cursor()\n try:\n records = cursor.execute(sql).fetchall()\n except pyodbc.DataError:\n err_msg = 'caused by the sql statement \"' + sql + '\"'\n raise pyodbc.DataError(err_msg)\n except pyodbc.ProgrammingError:\n err_msg = 'caused by the sql statement \"' + sql + '\"'\n raise pyodbc.ProgrammingError(err_msg)\n except AttributeError:\n err_msg = 'caused by the sql statement \"' + sql + '\"'\n raise AttributeError(err_msg)\n except pyodbc.DataError:\n err_msg = 'caused by the sql statement \"' + sql + '\"'\n raise pyodbc.DataError(err_msg)\n except Exception:\n err_msg = 'caused by the sql statement \"' + sql + '\"'\n raise Exception(err_msg)\n finally:\n db.close()\n\n # this section commented out since we discovered there are scenarios\n # where we want to return no records, and it is not an error\n #if len(records) < 1:\n # err_msg = 'The sql statement \"' + sql + '\" returned no records'\n # raise IndexError(err_msg)\n return records, cursor.description\n cursor.close()",
"def query(self, sql):\n try:\n res_cursor = self.connection.execute(text(sql))\n except Exception as e: \n raise e(\"SQL execution error!\")\n \n rows = (Row(res_cursor.keys(), record) for record in res_cursor)\n results = RowsCollection(rows)\n return results",
"def _query_all(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n result = cur.fetchall()\n cur.close()\n conn.close()\n return result",
"def select_sql(command):\n logging.debug(\"Running Select sql \"+str(command))\n try:\n## host, userid, password, database instance\n con = mdb.connect(serverip, username, userpass, schema);\n cursor = con.cursor()\n \n sql = command\n cursor.execute(sql)\n return cursor.fetchall()\n \n con.close()\n\n except mdb.Error, e:\n logger.error(e)",
"def sql_select(self, sqlstr):\n conn = sqlite3.connect('./as400-sqlite-test.db')\n cur = conn.cursor()\n cur.execute(sqlstr)\n return cur.fetchall()",
"def fetch_data(universe='all', start=None, end=None, connection=None, tablename=None, where_clause = None):\n from datetime import datetime, timedelta\n if end is None:\n end = datetime.today().strftime('%Y-%m-%d')\n if start is None:\n start = (datetime.today() - timedelta(days=30)).strftime('%Y-%m-%d')\n q = []\n select = \"SELECT * from {tablename} where \".format(tablename=tablename)\n if universe != 'all':\n q.append(\"symbol in {universe}\")\n q.append(\"timestamp >= '{start}'\")\n q.append(\"timestamp <= '{end}'\")\n if where_clause:\n [q.append(x)for x in where_clause]\n order_by = ' ORDER BY timestamp'\n query = ' AND '.join(q).format(universe=tuple(universe), \n start=start, end=end)\n query = select + query + order_by\n # This should be any column\n data = pd.read_sql_query(query, connection, parse_dates=['timestamp'])\n # Delete index column if any\n if 'index' in data.columns:\n del data['index']\n return data",
"def select(conn, sql):\n cur = conn.cursor()\n cur.execute(sql)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)",
"def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet",
"def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)"
]
| [
"0.709314",
"0.709314",
"0.6972477",
"0.6920157",
"0.6900757",
"0.6877643",
"0.68701386",
"0.6856005",
"0.67879015",
"0.6689552",
"0.6670152",
"0.6652809",
"0.6617898",
"0.66163635",
"0.6590509",
"0.65665734",
"0.65189326",
"0.6489373",
"0.6484665",
"0.64830947",
"0.6477366",
"0.64772713",
"0.6443064",
"0.6431067",
"0.64258796",
"0.64249086",
"0.6409052",
"0.6408804",
"0.6394251",
"0.6387525"
]
| 0.73528874 | 0 |
Freeze all parameters of `net` | def freeze(net):
for p in net.parameters():
p.requires_grad_(False)
return net | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unfreeze(net):\n for p in net.parameters():\n p.requires_grad_(True)\n return net",
"def freeze(self):\n # Freeze.\n self.frozen = True\n for param in self.parameters():\n param.requires_grad = False",
"def __freeze(self):\r\n features_layer = self._model._net\r\n for param in features_layer.parameters():\r\n param.requires_grad = False",
"def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False",
"def _freeze_tgt_networks(self):\n q1 = zip(self.tgt_q1.parameters(), self.soft_q1.parameters())\n q2 = zip(self.tgt_q2.parameters(), self.soft_q2.parameters())\n\n # Copy parameters\n for target_param, param in q1:\n target_param.data.copy_(param.data)\n for target_param, param in q2:\n target_param.data.copy_(param.data)\n\n # Freeze gradients\n for param in self.tgt_q1.parameters():\n param.requires_grad = False\n for param in self.tgt_q2.parameters():\n param.requires_grad = False",
"def freeze_params(model: nn.Module):\n for par in model.parameters():\n par.requires_grad = False",
"def freeze_parameters(module: nn.Module):\n for p in module.parameters():\n p.requires_grad = False",
"def _freeze_stages(self) -> None:\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False",
"def freeze_params(m):\r\n for p in m.parameters():\r\n p.requires_grad = False",
"def freeze_params(module: nn.Module):\n for _, p in module.named_parameters():\n p.requires_grad = False",
"def freeze_params(m):\n for p in m.parameters():\n p.requires_grad = False",
"def freeze(self):\n self.collect_params().setattr('grad_req', 'null')",
"def freeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = False",
"def unfreeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = True",
"def freeze_model(model):\n for param in model.parameters():\n param.requires_grad = False",
"def freeze_encoder(self):\n self.dfs_freeze(self.net.conv1)\n self.dfs_freeze(self.net.conv2)\n self.dfs_freeze(self.net.conv3)\n self.dfs_freeze(self.net.conv4)\n self.dfs_freeze(self.net.conv5)",
"def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n FrozenBatchNorm2d.convert_frozen_batchnorm(self)\n return self",
"def sync(net, net_tar):\n for var, var_tar in zip(net.trainable_weights,\n net_tar.trainable_weights):\n var_tar.assign(var)",
"def freeze_img_branch_params(self):\n if self.with_img_bbox_head:\n for param in self.img_bbox_head.parameters():\n param.requires_grad = False\n if self.with_img_backbone:\n for param in self.img_backbone.parameters():\n param.requires_grad = False\n if self.with_img_neck:\n for param in self.img_neck.parameters():\n param.requires_grad = False\n if self.with_img_rpn:\n for param in self.img_rpn_head.parameters():\n param.requires_grad = False\n if self.with_img_roi_head:\n for param in self.img_roi_head.parameters():\n param.requires_grad = False",
"def freeze(self) -> None:\n self._set_requires_grad(False)\n for param in self.model.fc.parameters():\n param.requires_grad = True",
"def reset_parameters(self) -> None:\n for gnn_block in self.gnn_blocks:\n gnn_block.reset_parameters()",
"def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)",
"def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)",
"def freeze_neuron(self,optimizer):\n\n n_neurons = self.num_hiddens\n params = []\n for i in range(n_neurons):\n params.append(\n # input2hidden\n {'params': self.input2hidden_layers[str(i)].parameters(), 'lr': 0},\n )\n params.append(\n # hidden2output\n {'params': self.hidden2output_layers[str(i)].parameters(), 'lr': 0.001},\n )\n if n_neurons > 1:\n for i in range(int(n_neurons*(n_neurons-1)/2)):\n params.append(\n # hidden2hidden\n {'params': self.hidden2hidden_layers[str(i)].parameters(), 'lr': 0},\n )\n optimizer = torch.optim.SGD(params, momentum=0.9,lr=0.001)\n return optimizer",
"def _Freeze(self) -> None:\n self._SetNodes(_FROZEN_NODE_COUNT)",
"def reset(self):\n print('Network reset to its original copy')\n self.net = self.copy.copy()\n self.current_threshold = None\n self.method = None",
"def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False",
"def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False",
"def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False",
"def freeze_lowperf(self):\n \n self.freeze_model()\n\n # defreeze params of only being used by the high-performance model\n for i in range(1,5):\n layer = getattr(self, \"layer\"+str(i))\n if self.block_type == 'Bottleneck':\n layer[0].conv3.weight.requires_grad = True\n layer[0].bn3.train()\n elif self.block_type == 'BasicBlock':\n layer[0].conv2.weight.requires_grad = True\n layer[0].bn2.train()\n else:\n print(\"[Error] Unknown block type\")\n\n\n num_skip = len(layer)//2\n for j in range(1, num_skip+1):\n for param in layer[j].parameters():\n param.requires_grad = True\n layer[j].train()"
]
| [
"0.7966241",
"0.7187328",
"0.7122004",
"0.7078595",
"0.70181316",
"0.68783057",
"0.6774389",
"0.6707521",
"0.66708046",
"0.66631836",
"0.66451365",
"0.6607336",
"0.6528228",
"0.6503718",
"0.6395548",
"0.6271478",
"0.6230465",
"0.6174202",
"0.613142",
"0.6124019",
"0.6113008",
"0.60478836",
"0.6046227",
"0.59840167",
"0.5981752",
"0.59759843",
"0.5968068",
"0.589927",
"0.589927",
"0.58856106"
]
| 0.8028887 | 0 |
Unfreeze all parameters of `net` | def unfreeze(net):
for p in net.parameters():
p.requires_grad_(True)
return net | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def freeze(net):\n for p in net.parameters():\n p.requires_grad_(False)\n return net",
"def unfreeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = True",
"def __freeze(self):\r\n features_layer = self._model._net\r\n for param in features_layer.parameters():\r\n param.requires_grad = False",
"def freeze(self):\n # Freeze.\n self.frozen = True\n for param in self.parameters():\n param.requires_grad = False",
"def unfreeze(self) -> None:\n self._set_requires_grad(True)",
"def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False",
"def freeze_params(model: nn.Module):\n for par in model.parameters():\n par.requires_grad = False",
"def unfreeze(self, exclude_range=None):\n # make all layers trainable\n for i, layer in enumerate(self.model.layers):\n layer.trainable = True\n if exclude_range:\n for i, layer in enumerate(self.model.layers[:exclude_range]):\n layer.trainable = False\n self._recompile()\n return",
"def freeze_parameters(module: nn.Module):\n for p in module.parameters():\n p.requires_grad = False",
"def freeze_params(module: nn.Module):\n for _, p in module.named_parameters():\n p.requires_grad = False",
"def freeze_layers(model: torch.nn.Module) -> None:\n for param in model.parameters():\n param.requires_grad = False",
"def freeze(self):\n self.collect_params().setattr('grad_req', 'null')",
"def freeze_params(m):\r\n for p in m.parameters():\r\n p.requires_grad = False",
"def _freeze_tgt_networks(self):\n q1 = zip(self.tgt_q1.parameters(), self.soft_q1.parameters())\n q2 = zip(self.tgt_q2.parameters(), self.soft_q2.parameters())\n\n # Copy parameters\n for target_param, param in q1:\n target_param.data.copy_(param.data)\n for target_param, param in q2:\n target_param.data.copy_(param.data)\n\n # Freeze gradients\n for param in self.tgt_q1.parameters():\n param.requires_grad = False\n for param in self.tgt_q2.parameters():\n param.requires_grad = False",
"def freeze_params(m):\n for p in m.parameters():\n p.requires_grad = False",
"def unroll(self) -> None:\n\n for flat in self.params:\n if self.global_ref_rank != self.global_rank and self.gradients_based:\n # this rank is not the owner, release the grad\n flat.param.grad = None\n else:\n if self.gradients_based:\n # this rank is the owner, unroll the results\n assert flat.param.grad is not None\n\n flat.param.grad.data.copy_(\n self.buffer[flat.start : flat.stop].view_as(flat.param.data), non_blocking=True\n )\n else:\n flat.param.data.copy_(\n self.buffer[flat.start : flat.stop].view_as(flat.param.data), non_blocking=True\n )\n\n self.reset()",
"def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})",
"def reset(self):\n print('Network reset to its original copy')\n self.net = self.copy.copy()\n self.current_threshold = None\n self.method = None",
"def unfreeze_named_layers(model, keys: Tuple = ()):\n for key in keys:\n for name, param in model.named_parameters():\n if name.startswith(key):\n param.requires_grad = True",
"def freeze_model(model):\n for param in model.parameters():\n param.requires_grad = False",
"def disown(self):\r\n for apply_node in self.apply_nodes:\r\n del apply_node.fgraph\r\n del apply_node.deps\r\n for variable in self.variables:\r\n del variable.fgraph\r\n del variable.clients\r\n self.apply_nodes = set()\r\n self.variables = set()\r\n self.inputs = None\r\n self.outputs = None",
"def _freeze_stages(self) -> None:\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False",
"def reset_parameters(self) -> None:\n for gnn_block in self.gnn_blocks:\n gnn_block.reset_parameters()",
"def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")",
"def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.gnn_intra_cluster is not None:\n self.gnn_intra_cluster.reset_parameters()\n self.select.reset_parameters()",
"def resetparams(self, parameters):\n self.weights = None\n try:\n self.params = parameters\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()"
]
| [
"0.72357833",
"0.7064379",
"0.6781601",
"0.6590634",
"0.6506778",
"0.6480661",
"0.63610387",
"0.6295241",
"0.6283971",
"0.62608975",
"0.624865",
"0.6229807",
"0.6151874",
"0.6148073",
"0.6140297",
"0.6124738",
"0.61239815",
"0.6103413",
"0.6091473",
"0.60638964",
"0.60376936",
"0.5913211",
"0.58436745",
"0.58394915",
"0.5821645",
"0.58196634",
"0.57969445",
"0.57969445",
"0.57969445",
"0.5768903"
]
| 0.8412342 | 0 |
Compute the entropy of the categorical distribution specified by the logits `out` along dimension `dim`. | def entropy(out, dim=1, reduce='mean'):
log_prob = F.log_softmax(out, dim=dim)
h = -torch.sum(log_prob.exp() * log_prob, dim=dim)
if reduce == 'none':
return h
if reduce == 'mean':
return h.mean()
if reduce == 'sum':
return h.sum() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n out = torch.distributions.Categorical(logits=logits).entropy()\n if out.ndim > 1:\n out = out.squeeze(-1)\n return out",
"def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))",
"def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.entropy, _crank16.entropy, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def entropy(target_col):\n elements,counts = np.unique(target_col,return_counts = True)\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n return entropy",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)",
"def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)",
"def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H",
"def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy",
"def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])",
"def entropy(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n K = self._posterior_covariance(X)\n L = np.linalg.cholesky(K)\n D = len(X)\n return np.sum(np.log(np.diag(L))) + 0.5 * D * np.log(2*np.pi*np.exp(1))",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)",
"def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy",
"def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])",
"def calculate_entropy(dataset) :\n\n num_entries = len(dataset)\n label_counts = {}\n for vector in dataset :\n # the label is at the last index of the data set\n current_label = vector[-1]\n if current_label not in label_counts :\n label_counts[current_label] = 0\n label_counts[current_label] += 1\n # Calculate the entropy\n entropy = 0.0\n for label in label_counts :\n # Calculate probability of each label within the dataset\n prob_of_label = label_counts[label]/num_entries\n # Since the entropy is the negative of the sum of all probability,\n # simply subtract it\n entropy -= prob_of_label * log(prob_of_label, 2)\n return entropy",
"def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])",
"def cross_entropy_cost(output_out, target_out):\r\n total = 0\r\n for target_node in range(len(target_out)): # For each target data set\r\n for output_node in range(len(output_out)): # For each output node\r\n total += target_out[target_node][output_node] - target_out[target_node][output_node] * np.log(output_out[output_node]) - \\\r\n (1 - target_out[target_node][output_node]) * np.log(1 - output_out[output_node])\r\n\r\n total = 1 / total\r\n return total",
"def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr",
"def entropy(self, mean=None, cov=1):\r\n dim, mean, cov = _process_parameters(None, mean, cov)\r\n return 1/2 * np.log(np.linalg.det(2 * np.pi * np.e * cov))",
"def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()",
"def entropy(class_probabilities):\n return sum(-p * math.log(p, 2)\n for p in class_probabilities\n if p) #ignore 0's"
]
| [
"0.71384525",
"0.69670725",
"0.6888391",
"0.679668",
"0.67822194",
"0.6742934",
"0.65603775",
"0.65472746",
"0.6542067",
"0.65256166",
"0.65150386",
"0.64690787",
"0.6459301",
"0.6450607",
"0.6447196",
"0.6431473",
"0.6423969",
"0.6371483",
"0.63533247",
"0.6341583",
"0.63393337",
"0.6316785",
"0.6307708",
"0.63019335",
"0.6272748",
"0.62685776",
"0.626699",
"0.626699",
"0.6247537",
"0.6230878"
]
| 0.8238204 | 0 |
Counts the number of parameters of `net` | def nb_parameters(net):
return sum(p.numel() for p in net.parameters()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)",
"def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}",
"def count_params(layer):\n params = get_all_params(layer)\n shapes = [p.get_value().shape for p in params]\n counts = [np.prod(shape) for shape in shapes]\n return sum(counts)",
"def calc_block_num_params2(net):\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n return weight_count",
"def count_parameters(model):\n return sum(p.numel() for p in model.parameters())",
"def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)",
"def num_params(self):",
"def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()",
"def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()",
"def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()",
"def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count",
"def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count",
"def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total",
"def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))",
"def print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print(\"Total number of parameters: {}\".format(num_params))",
"def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters",
"def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count",
"def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param",
"def count_layer_params(layer):\n num_params = 0\n name, param_names, dims, _, _ = layer.get_layer_info()\n nparams = len(dims)\n for j in range(nparams):\n num_params += np.prod(dims[j])\n return num_params",
"def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params",
"def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)",
"def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())",
"def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num",
"def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)",
"def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)",
"def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)",
"def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6",
"def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6",
"def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])",
"def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params"
]
| [
"0.80932635",
"0.78887635",
"0.7600187",
"0.7538338",
"0.7526785",
"0.7526244",
"0.74478954",
"0.74346536",
"0.74346536",
"0.74346536",
"0.73890316",
"0.7386938",
"0.7340822",
"0.7327687",
"0.7321227",
"0.7281321",
"0.7249664",
"0.7189953",
"0.7161822",
"0.7148914",
"0.71389216",
"0.71247584",
"0.7098886",
"0.70864433",
"0.70805454",
"0.70805454",
"0.7072524",
"0.7072524",
"0.7069525",
"0.70231825"
]
| 0.8331685 | 0 |
Get a submodule at any depth of a net by its name | def layer_by_name(net, name):
for l in net.named_modules():
if l[0] == name:
return l[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def returnRigNetworkNode(self):\n modules = []\n networkNodes = cmds.ls(type=\"network\")\n for node in networkNodes:\n attrs = cmds.listAttr(node)\n if \"moduleName\" in attrs:\n if cmds.getAttr(node + \".moduleName\") == self.name:\n characterNode = cmds.listConnections(node + \".parent\")[0]\n if cmds.objExists(characterNode + \".namespace\"):\n if cmds.getAttr(characterNode + \".namespace\") == self.namespace.partition(\":\")[0]:\n networkNode = node\n return networkNode\n else:\n return None",
"def get_submodule(self, submodule_basename):\n\n return self._submodule_basename_to_node[submodule_basename]",
"def imp_find_dotted_module(name):\n path = None\n for x in name.split('.'):\n result = imp.find_module(x, path)\n path = [result[1]]\n return result",
"def imp_find_dotted_module(name):\n path = None\n for x in name.split('.'):\n result = imp.find_module(x, path)\n path = [result[1]]\n return result",
"def _look_in_package(tree: dict, module_path: str, name: str, level: Optional[int] = None) -> Union[str, None]:\n parent_path = os.path.dirname(module_path)\n if level is not None:\n for _ in range(level - 1):\n parent_path = os.path.dirname(parent_path)\n parent = find_tree(tree, lambda x, p: x[\"path\"] in [p, os.path.join(p, \"__init__.py\")], args=(parent_path,))\n if parent:\n if parent[\"fullname\"] in [name, \"{}.__init__\".format(name)]:\n return parent[\"path\"]\n for child in parent[\"children\"].values():\n if child[\"name\"] == name:\n return child[\"path\"]\n target = find_tree(tree, lambda x, f: x[\"fullname\"] == f, args=(\"{}.{}\".format(parent[\"fullname\"], name),))\n if target:\n return target[\"path\"]\n return None",
"def findModule(name):",
"def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])",
"def _fetch_submodule(opts, name, cache_dir, revision, site):\n git_dir = '--git-dir=' + cache_dir\n\n # check if we have the target revision cached; if so, submodule is ready\n if os.path.isdir(cache_dir) and not opts.ignore_cache:\n if not revision:\n return _sync_git_origin(cache_dir, site)\n\n if revision_exists(git_dir, revision) in REVISION_EXISTS:\n return _sync_git_origin(cache_dir, site)\n\n log('processing submodule (package: {}) {}...', opts.name, name)\n sys.stdout.flush()\n\n # validate any cache directory (if one exists)\n has_cache, bad_validation = _validate_cache(cache_dir)\n if bad_validation:\n return None\n\n # if we have no cache for this repository, build one\n if not has_cache:\n if not ensure_dir_exists(cache_dir):\n return False\n\n if not _create_bare_git_repo(cache_dir):\n return False\n\n # ensure configuration is properly synchronized\n if not _sync_git_origin(cache_dir, site):\n return False\n\n # fetch sources for this submodule\n desc = 'submodule ({}): {}'.format(opts.name, name)\n return _fetch_srcs(opts, cache_dir, revision, desc=desc)",
"def _load_tail(self, package, submodule_name):\n self.msgin(4, \"load_tail\", package, submodule_name)\n\n submodule = package\n while submodule_name:\n i = submodule_name.find('.')\n if i < 0:\n i = len(submodule_name)\n head, submodule_name = submodule_name[:i], submodule_name[i+1:]\n mname = \"%s.%s\" % (submodule.identifier, head)\n submodule = self._safe_import_module(head, mname, submodule)\n\n if submodule is None:\n # FIXME: Why do we no longer return a MissingModule instance?\n # result = self.createNode(MissingModule, mname)\n self.msgout(4, \"raise ImportError: No module named\", mname)\n raise ImportError(\"No module named \" + repr(mname))\n\n self.msgout(4, \"load_tail ->\", submodule)\n return submodule",
"def _get_info(self, fullmodname):\n parts = fullmodname.split('.')\n submodname = parts[-1]\n modpath = '/'.join(parts)\n for suffix, is_package in _SEARCH_ORDER:\n relpath = modpath + suffix\n try:\n self.datablocks[relpath]\n except KeyError:\n pass\n else:\n return submodname, is_package, relpath\n msg = ('Can\\'t find module %s in .blend %r' %\n (fullmodname, self.path_entry))\n ##logging.debug(msg)\n raise BlendImportError(msg)",
"def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )",
"def _find_gitmodules(p4, stream_name):\n parent = p4gf_util.first_dict(p4.run('stream', '-ov', stream_name))\n for line in parent['View']:\n if '.gitmodules' in line:\n # return everything up to the ' .gitmodules' at the end of the line\n return line[:-12]\n return None",
"def get_submodule_or_none(self, submodule_basename):\n\n return self._submodule_basename_to_node.get(submodule_basename)",
"def find_module (self, name, path = None):\n return self if name in self.containments else None",
"def find_ResNet_layer(arch, target_layer_name):\n\n hierarchy = target_layer_name.rsplit(\"_\",1)\n \n\n if target_layer_name.rsplit(\"_\",1)[0] == \"layer1\":\n target_layer = arch.layer1\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer2\":\n target_layer = arch.layer2\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer3\":\n target_layer = arch.layer3\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer4\":\n target_layer = arch.layer4\n \n# print(target_layer)\n if len(hierarchy) == 2:\n target_layer = target_layer[int(hierarchy[1])]\n\n return target_layer",
"def get_public_parent_module(node_a: NodeStruct, node_b: NodeStruct):\n find = False\n b_onnx_name = node_b.onnx_name\n tmp = node_a\n while not find:\n parent_struct = tmp.parent_module_struct\n if b_onnx_name in parent_struct.onnx_names:\n find = True\n tmp = parent_struct\n return tmp",
"def find_local_module(root, name):\n\n return pm.find_local_module(root, name)",
"def _get_module(dotted_path):\n package, module = dotted_path.rsplit('.', 1)\n return getattr(import_module(package), module)",
"def _resolve_name(name, package, level):\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for x in xrange(level, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level package\")\n return \"%s.%s\" % (package[:dot], name)",
"def locate_own_layer(name, group):\n for child in group.children():\n if isinstance(child, QgsLayerTreeLayer):\n if child.name() == name:\n return child\n return None",
"def get_most_imported_submodule(self, n=None, use_count_less_than=None, use_count_more_than=None, _filter=None):\n\t\treturn self._get_most_imported_helper(\"SUBMODULE\", n, use_count_less_than, use_count_more_than, _filter)",
"def get_code(self, parent, modname, fqname):\n\n if self.verbose:\n print >> sys.stderr, '-'*78\n print >> sys.stderr, \"Importing %s from the network ...\" % fqname\n print >> sys.stderr, '-'*78\n\n\n out = None\n for baseurl in self.path:\n\n proto_url = '/'.join([baseurl] + fqname.split('.'))\n\n\n # Is this a package?\n # ==================\n # If so, we want to look for __init__.py.\n\n is_package = self.download(proto_url + '/')\n if is_package:\n proto_url += '/__init__'\n\n\n # Try to find some code.\n # ======================\n\n for suffix in imp.get_suffixes():\n url = proto_url + suffix[0]\n fp = self.download(url)\n if fp is not None:\n\n # Prepare elements for imputil.Importer.\n # ======================================\n\n mod = imp.load_module(modname, fp, fp.name, suffix)\n out = (is_package, mod, {})\n break\n\n if out is not None:\n break\n\n return out",
"def _resolve_name(name, package, level):\r\n if not hasattr(package, 'rindex'):\r\n raise ValueError(\"'package' not set to a string\")\r\n dot = len(package)\r\n for x in xrange(level, 1, -1):\r\n try:\r\n dot = package.rindex('.', 0, dot)\r\n except ValueError:\r\n raise ValueError(\"attempted relative import beyond top-level \"\r\n \"package\")\r\n return \"%s.%s\" % (package[:dot], name)",
"def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None",
"def returnNetworkNode(self):\n\n networkNodes = cmds.ls(type=\"network\")\n for node in networkNodes:\n attrs = cmds.listAttr(node)\n if \"moduleName\" in attrs:\n if cmds.getAttr(node + \".moduleName\") == self.name:\n networkNode = node\n\n return networkNode",
"def get_least_imported_submodule(self, n=None, use_count_less_than=None, use_count_more_than=None, _filter=None):\n\t\treturn self._get_most_imported_helper(\"SUBMODULE\", -n, use_count_less_than, use_count_more_than, _filter)",
"def _get_submodules():\n import sh\n git = sh.git.bake(_tty_out=False)\n submodules = git.submodule().strip().split(\"\\n\")\n return [\n line.strip()[1:].split()[1]\n for line in submodules\n ]",
"def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None",
"def _get_network(name):\n\n if name not in _NAME_TO_NETS:\n raise ValueError('Network name [%s] not recognized.' % name)\n return _NAME_TO_NETS[name].model",
"def test_module_get_parent(self):\n m = Module('foo')\n assert m.parent_module is None\n\n m = Module('foo.bar')\n assert str(m.parent_module) == 'foo'\n\n m = Module('foo.bar.qux')\n assert str(m.parent_module) == 'foo.bar'\n assert str(m.module_name) == 'qux'"
]
| [
"0.597291",
"0.5945646",
"0.5883425",
"0.5883425",
"0.5880742",
"0.58022046",
"0.5745263",
"0.5617044",
"0.5609998",
"0.55480266",
"0.5542549",
"0.55382264",
"0.55318534",
"0.5530302",
"0.55277735",
"0.5472227",
"0.5464447",
"0.54170537",
"0.5414487",
"0.54097706",
"0.54001594",
"0.53798294",
"0.53730106",
"0.5359821",
"0.535259",
"0.53200406",
"0.53194654",
"0.52908486",
"0.5273338",
"0.5269077"
]
| 0.677068 | 0 |
Cycle through `iterable` forever | def forever(iterable):
it = iter(iterable)
while True:
try:
yield next(it)
except Exception as e:
print(e)
it = iter(iterable) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator",
"def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item",
"def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item",
"def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element",
"def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample",
"def color_cycle():\n while True:\n for color in colors:\n yield color",
"async def _aiter_sync(iterable: Iterable[T]) -> AsyncIterator[T]:\n for item in iterable:\n yield item",
"def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]",
"def itime(iterable, seconds):\n items = iter(iterable)\n\n end = time.time() + seconds\n yield items.next()\n\n for item in itertools.takewhile(lambda _: time.time() < end, items):\n yield item",
"def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate",
"def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next",
"def iterate(func, x):\n while True:\n x = func(x)\n yield x",
"def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat",
"def cooperative_iter(citer):\n try:\n for chunk in citer:\n sleep(0)\n yield chunk\n except Exception as err:\n msg = (_(\"Error: cooperative_iter exception %(error)s\") %\n dict(error=err))\n LOG.error(msg)\n raise",
"def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2",
"def cycle(seq, n=None):\n if n is not None:\n return Iter(_ncycle(n, seq))\n return Iter(itertools.cycle(seq))",
"def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item",
"def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data",
"def _cycle_loop(self):\n cycle, idx = self.cycling, self.current_idx # Local copy to avoid race condition updates\n\n if cycle: # Iterate to next command\n idx = (idx+1) % len(self)\n self.current_idx = idx\n self.updated = True\n\n time.sleep(self.cycle_interval)",
"def __iter__(self):\n while True:\n for item in (self[i] for i in range(len(self))):\n yield item",
"def dewindowify(iterable):\n for _, current, _ in iterable:\n yield current",
"def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)",
"def iterate(f, x):\n while True:\n yield x\n x = f(x)",
"def dispatch_next(self):\r\n self._dispatch_amount += 1\r\n while self._dispatch_amount:\r\n try:\r\n # XXX: possible race condition shuffling the order of\r\n # dispatches in the next two lines.\r\n func, args, kwargs = next(self._original_iterable)\r\n self.dispatch(func, args, kwargs)\r\n self._dispatch_amount -= 1\r\n except ValueError:\r\n \"\"\" Race condition in accessing a generator, we skip,\r\n the dispatch will be done later.\r\n \"\"\"\r\n except StopIteration:\r\n self._iterating = False\r\n self._original_iterable = None\r\n return",
"def __iter__(self):\n i = self.head\n while True:\n if not i:\n break\n yield i\n i = i.next\n if not i:\n break",
"def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1",
"def iter_py():\n s = \"Hello, World!\"\n it = iter(s)\n while True:\n try:\n print(next(it))\n except:\n break\n\n ## Output\n # H\n # e\n # l\n # l\n # o\n # ,\n #\n # W\n # o\n # r\n # l\n # d\n # !",
"def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)",
"def swarm(iterable, download=None):\n\n return [e for e in each(iterable, download=download)]",
"def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)"
]
| [
"0.8115654",
"0.7242247",
"0.70141625",
"0.6768458",
"0.6365863",
"0.63156235",
"0.6294322",
"0.6273144",
"0.6229883",
"0.6152843",
"0.61099154",
"0.60607177",
"0.6034094",
"0.6027254",
"0.5995027",
"0.5991488",
"0.59734607",
"0.59694564",
"0.5939427",
"0.5904821",
"0.59019804",
"0.58876383",
"0.58676744",
"0.5835279",
"0.58302796",
"0.5829687",
"0.5804123",
"0.5694936",
"0.5694416",
"0.56915736"
]
| 0.7716013 | 1 |
Return the Gram matrix of `m` | def gram(m):
m1 = m
m2 = m.t()
g = torch.mm(m1, m2) / m.shape[1]
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_GS_matrix(self, matrix):\n GS_matrix = Matrix(QQ, matrix.transpose().gram_schmidt()[0]).transpose()\n return GS_matrix",
"def bgram(m):\n m = m.view(m.shape[0], m.shape[1], -1)\n m1 = m\n m2 = m.permute(0, 2, 1)\n g = torch.bmm(m1, m2) / (m.shape[1] * m.shape[2])\n return g",
"def gains_vector(m):\n\n n_ant, n_chan, n_dir, _ = m.shape\n row_shape = n_ant * n_chan * n_dir\n g = np.zeros((2*row_shape), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n g[row] = m[a, nu, s, 0]\n g[row + row_shape] = m[a, nu, s, 1]\n \n return g",
"def gram_matrix(x):\n # get input's dimension\n b, c, h, w = x.size()\n\n # calculate the gram martix\n features = x.view(b * c, h * w)\n G = torch.mm(features, features.t())\n\n return G.div(b * c * h * w)",
"def Gmat(self, phi):\n return self._Gmat_cache * rotmat(3, phi)",
"def gram_matrix(input_data):\n a, b, c, d = input_data.size()\n features = input_data.view(b, a * c * d) \n G = torch.mm(features, features.t())\n return G.div(a * b * c * d)",
"def gp(M, N):\n\n return M*N",
"def get_l(m):\n L = m.copy()\n for i in range(L.shape[0]):\n L[i, i] = 1\n L[i, i+1:] = 0\n return np.matrix(L)",
"def gram_matrix(input_tensor):\r\n\r\n temp = tf.squeeze(input_tensor)\r\n return tf.matmul(temp, tf.transpose(temp))",
"def true_gains_vector(m):\n\n n_ant, n_chan, n_dir = m.shape\n row_shape = n_ant * n_chan * n_dir\n g = np.zeros((2*row_shape), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n g[row] = m[a, nu, s]\n g[row + row_shape] = m[a, nu, s].conj()\n\n return g",
"def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)",
"def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]",
"def gram_matrix(features, normalize=True):\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def _AffineGrothendieck(self, w,m):\n return sum(self._G_to_km_on_basis_single_level(w,j) for j in range(w.length(),m+1))",
"def cell2Gmat(cell):\n g = fillgmat(cell)\n G = nl.inv(g)\n return G, g",
"def G_ind(m, williams=False):\r\n f_ln_f_elements = safe_sum_p_log_p(m)\r\n f_ln_f_rows = safe_sum_p_log_p(sum(m, 0))\r\n f_ln_f_cols = safe_sum_p_log_p(sum(m, 1))\r\n tot = sum(ravel(m))\r\n f_ln_f_table = tot * log(tot)\r\n\r\n df = (len(m) - 1) * (len(m[0]) - 1)\r\n G = 2 * (f_ln_f_elements - f_ln_f_rows - f_ln_f_cols + f_ln_f_table)\r\n if williams:\r\n q = 1 + ((tot * sum(1.0 / sum(m, 1)) - 1) * (tot * sum(1.0 / sum(m, 0)) - 1) /\r\n (6 * tot * df))\r\n G = G / q\r\n return G, chi_high(max(G, 0), df)",
"def calculate_gram_matrix(x):\n\n batch_size, depth, height, width = x.size()\n x = x.view(depth, height*width)\n gram_matrix = torch.mm(x, x.t())\n return gram_matrix",
"def getGMfromMI(self, mmi, imt, dists=None, mag=None): # noqa\r\n lfact = np.log10(np.e)\r\n c = self._getConsts(imt)\r\n mmi = mmi.copy()\r\n # Set nan values to 1\r\n ix_nan = np.isnan(mmi)\r\n mmi[ix_nan] = 1.0\r\n\r\n pgm = np.zeros_like(mmi)\r\n dpgm_dmmi = np.zeros_like(mmi)\r\n dummy_variable = np.ones(len(mmi))\r\n\r\n #\r\n # MMI to PGM\r\n #\r\n pgm = np.power(10, (mmi - c['C1']) / c['C2'])\r\n dpgm_dmmi = 1.0 / (c['C2'] * lfact) * dummy_variable\r\n\r\n if imt != self._pgv:\r\n units = 981.0\r\n else:\r\n units = 1.0\r\n\r\n # Return a ln(amp) value. Convert PGA to from cm/s^2 to g\r\n pgm /= units\r\n pgm = np.log(pgm)\r\n\r\n # Set nan values back from 1 to nan\r\n pgm[ix_nan] = np.nan\r\n dpgm_dmmi[ix_nan] = np.nan\r\n\r\n return pgm, dpgm_dmmi",
"def Gram_matrix(self, F, F1, lower, upper):\r\n assert self.input_dim == 1\r\n def L(x, i):\r\n return(1. / self.lengthscale * F[i](x) + F1[i](x))\r\n n = F.shape[0]\r\n G = np.zeros((n, n))\r\n for i in range(n):\r\n for j in range(i, n):\r\n G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]\r\n Flower = np.array([f(lower) for f in F])[:, None]\r\n return(self.lengthscale / 2. / self.variance * G + 1. / self.variance * np.dot(Flower, Flower.T))",
"def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()",
"def _get_gram_matrix(self, input_tensor):\n num_channels = int(input_tensor.shape[-1])\n input_vectors = tf.reshape(input_tensor, [-1, num_channels])\n num_vectors = tf.shape(input_vectors)[0]\n gram = tf.matmul(input_vectors, input_vectors, transpose_a=True)\n return gram / tf.cast(num_vectors, tf.float32)",
"def gram_matrix(input_tensor):\n\n # calculate the gram matrix of the input tensor\n gram = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) \n\n # get the height and width of the input tensor\n input_shape = tf.shape(input_tensor) \n height = input_shape[1] \n width = input_shape[2] \n\n # get the number of locations (height times width), and cast it as a tf.float32\n num_locations = tf.cast(height * width, tf.float32)\n\n # scale the gram matrix by dividing by the number of locations\n scaled_gram = gram / num_locations\n \n return scaled_gram",
"def hamming(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1,float)\n n = arange(0,M)\n return 0.54-0.46*cos(2.0*pi*n/(M-1))",
"def amat(M):\n aa, _ = a_matrix(M)\n return aa",
"def Gram_matrix(self,F,F1,F2,F3,lower,upper):\r\n assert self.input_dim == 1\r\n def L(x,i):\r\n return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))\r\n n = F.shape[0]\r\n G = np.zeros((n,n))\r\n for i in range(n):\r\n for j in range(i,n):\r\n G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]\r\n G_coef = 3.*self.lengthscale**5/(400*np.sqrt(5))\r\n Flower = np.array([f(lower) for f in F])[:,None]\r\n F1lower = np.array([f(lower) for f in F1])[:,None]\r\n F2lower = np.array([f(lower) for f in F2])[:,None]\r\n orig = 9./8*np.dot(Flower,Flower.T) + 9.*self.lengthscale**4/200*np.dot(F2lower,F2lower.T)\r\n orig2 = 3./5*self.lengthscale**2 * ( np.dot(F1lower,F1lower.T) + 1./8*np.dot(Flower,F2lower.T) + 1./8*np.dot(F2lower,Flower.T))\r\n return(1./self.variance* (G_coef*G + orig + orig2))",
"def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)",
"def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r",
"def convImg2gMatrix(self):\n self.file = self.file.convert(\"L\")\n matrix = numpy.asarray(self.file)\n # convert image to gray scale image\n # in the array, 0 represents black\n # 255 represents white\n # array[a,b] => a represents the line, b represents the columns\n # array[0,0] is the pixel in the top-left hand corner\n return matrix",
"def Matrix_G(r,xi,E,ops,j):\r\n #Array of multipliers for operators in V14\r\n #----------------------------------------\r\n raw_pot = av14.V14(r)\r\n #----------------------------------------\r\n \r\n #Operator Values \r\n #---------------------------------------- \r\n op00,op01,op10,op11 = ops\r\n \r\n #Matrix Values\r\n #----------------------------------------\r\n G00 = (j-1)*j/r**2 + xi*(np.sum(op00*raw_pot) - E)\r\n G01 = xi*(np.sum(op01*raw_pot))\r\n G10 = G01#xi*(np.sum(operators10*raw_pot))\r\n G11 = (j+1)*(j+2)/r**2 + xi*(np.sum(op11*raw_pot) - E)\r\n #Generate and return (2x2)\r\n #----------------------------------------\r\n return np.array([[G00,G01],[G10,G11]])",
"def blackman(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1, float)\n n = arange(0,M)\n return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))"
]
| [
"0.6688958",
"0.6615292",
"0.64081323",
"0.63711166",
"0.6317854",
"0.62635905",
"0.61815953",
"0.6151277",
"0.61144227",
"0.609927",
"0.60449326",
"0.60215586",
"0.6005096",
"0.59399354",
"0.5856431",
"0.58205944",
"0.58048844",
"0.5713924",
"0.5711078",
"0.56965727",
"0.56924176",
"0.5686094",
"0.5677701",
"0.5672724",
"0.56682074",
"0.56544065",
"0.5641967",
"0.56209874",
"0.5605658",
"0.5602428"
]
| 0.71347463 | 0 |
Return the batched Gram matrix of `m` | def bgram(m):
m = m.view(m.shape[0], m.shape[1], -1)
m1 = m
m2 = m.permute(0, 2, 1)
g = torch.bmm(m1, m2) / (m.shape[1] * m.shape[2])
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gram_matrix(input_tensor):\r\n\r\n temp = tf.squeeze(input_tensor)\r\n return tf.matmul(temp, tf.transpose(temp))",
"def gram(m):\n m1 = m\n m2 = m.t()\n g = torch.mm(m1, m2) / m.shape[1]\n return g",
"def bartlett(M):\r\n return bartlett_(M)",
"def gram_matrix(input_data):\n a, b, c, d = input_data.size()\n features = input_data.view(b, a * c * d) \n G = torch.mm(features, features.t())\n return G.div(a * b * c * d)",
"def Gram(layer, batch_size):\n if type(layer).__module__ == 'numpy':\n bs, h, w, c = layer.shape\n else:\n bs, h, w, c = layer.get_shape().as_list()\n if bs == 1 and batch_size > 1:\n rep_layer = tf.pack([layer for k in range(batch_size)])\n flat = tf.reshape(rep_layer, [batch_size, -1, c])\n else:\n flat = tf.reshape(layer, [bs, -1, c])\n return tf.batch_matmul(tf.transpose(flat, [0,2,1]), flat)/(h*w*c)",
"def gains_vector(m):\n\n n_ant, n_chan, n_dir, _ = m.shape\n row_shape = n_ant * n_chan * n_dir\n g = np.zeros((2*row_shape), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n g[row] = m[a, nu, s, 0]\n g[row + row_shape] = m[a, nu, s, 1]\n \n return g",
"def gram_matrix(x):\n # get input's dimension\n b, c, h, w = x.size()\n\n # calculate the gram martix\n features = x.view(b * c, h * w)\n G = torch.mm(features, features.t())\n\n return G.div(b * c * h * w)",
"def calculate_gram_matrix(x):\n\n batch_size, depth, height, width = x.size()\n x = x.view(depth, height*width)\n gram_matrix = torch.mm(x, x.t())\n return gram_matrix",
"def ttm(t, m, k):\n\n dim_list = [] # initialize a list to save dimension index to transpose the tensor reshapped from 2D matrix\n shape_list = [] # initialize a list to save the dimensions to reshape 2D matrix back to tensor\n total_dim = len(t.shape)\n for i in range(total_dim):\n dim_list.append((k - i) % total_dim)\n shape_list.append(t.shape[(k - i) % total_dim])\n dim_order = tuple(dim_list)\n shape_list[0] = m.shape[0]\n\n t_unfold = unfold_axis(t, k)\n t_mul = np.matmul(m, t_unfold)\n t_mul = np.reshape(t_mul,tuple(shape_list))\n t_mul = np.transpose(t_mul, dim_order)\n\n return t_mul",
"def gram_matrix(input_tensor):\n\n # calculate the gram matrix of the input tensor\n gram = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) \n\n # get the height and width of the input tensor\n input_shape = tf.shape(input_tensor) \n height = input_shape[1] \n width = input_shape[2] \n\n # get the number of locations (height times width), and cast it as a tf.float32\n num_locations = tf.cast(height * width, tf.float32)\n\n # scale the gram matrix by dividing by the number of locations\n scaled_gram = gram / num_locations\n \n return scaled_gram",
"def create_matrix_B(m,k,alpha=8,beta=.5):\n\n\tn = m*k\n\n\t#define draw probabilities for intercommunity and intracommunity edges\n\tp = alpha * math.log(m) / m\n\tq = beta * math.log(m) / m\n\n\t#create true label of communities\n\tg = []\n\tfor i in range(k):\n\t\ttemp = [i]*m\n\t\tg.extend(temp)\n\n\t#adjacency matrix\n\tA = np.zeros([n,n])\n\n\tfor r in range(n):\n\t\tfor c in range(r+1,n):\n\t\t\t#in the same community if they have the same value\n\t\t\tif g[r] == g[c]:\n\t\t\t\tA[r,c] = np.random.binomial(1,p)\n\t\t\t\tA[c,r] = A[r,c]\t\t\n\t\t\telse:\n\t\t\t\tA[r,c] = np.random.binomial(1,q)\n\t\t\t\tA[c,r] = A[r,c]\n\n\tB = 2*A - (np.ones([n,n]) - np.identity(n))\n\t\t\t\t\n\treturn B,g",
"def gram_matrix(features, normalize=True):\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def gram(tensor):\n B, C, H, W = tensor.shape\n x = tensor.view(B, C, H * W)\n x_t = x.transpose(1, 2)\n return torch.bmm(x, x_t) / (C * H * W)",
"def get_l(m):\n L = m.copy()\n for i in range(L.shape[0]):\n L[i, i] = 1\n L[i, i+1:] = 0\n return np.matrix(L)",
"def block_prior(m):\n n = 2 * m\n d = np.zeros((m, m, n, n))\n for i in range(m):\n for j in range(m):\n ii = 2 * i\n jj = 2 * j\n d[i, j, ii:ii + 2, jj:jj + 2] = 1\n return d.reshape(m * m, n * n)",
"def convert_bmm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n y = _op.transpose(y, [0, 2, 1])\n out = _op.nn.batch_matmul(x, y)\n g.add_node(op.output(\"Out\")[0], out)",
"def _compute_b_matrix(self) -> None:\n self.b_matrix = self._kronecker_product(tf.eye(self.n_points_int,\n dtype=tf.float64),\n self.g_matrix)\n self.b_matrix = tf.reshape(self.b_matrix,\n [self.n_points, self.dimensionality,\n self.n_points, self.dimensionality])\n self.b_matrix = tf.transpose(self.b_matrix, [1, 0, 3, 2])\n self.b_matrix = tf.reshape(self.b_matrix,\n [self.n_points * self.dimensionality,\n self.n_points * self.dimensionality])\n return",
"def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]",
"def true_gains_vector(m):\n\n n_ant, n_chan, n_dir = m.shape\n row_shape = n_ant * n_chan * n_dir\n g = np.zeros((2*row_shape), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n g[row] = m[a, nu, s]\n g[row + row_shape] = m[a, nu, s].conj()\n\n return g",
"def generate_GS_matrix(self, matrix):\n GS_matrix = Matrix(QQ, matrix.transpose().gram_schmidt()[0]).transpose()\n return GS_matrix",
"def gen_matrix(e):\n\tif e < 1:\n\t\treturn None\n\tm_list = [[[1, 2], [3, 0]]]\n\t_b = m_list[0]\n\tfor n in xrange(1, e):\n\t\tm = m_list[n - 1]\n\t\tm_list.append(\n\t\t\t[\n\t\t\t\t[4 * i + _b[0][0] for i in m[0]] + [4 * i + _b[0][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[0][0] for i in m[1]] + [4 * i + _b[0][1] for i in m[1]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[0]] + [4 * i + _b[1][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[1]] + [4 * i + _b[1][1] for i in m[1]],\n\t\t\t]\n\t\t)\n\treturn m_list",
"def gp(M, N):\n\n return M*N",
"def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K",
"def make_branch_matrix(self):\n self.bm = []\n for pod in self.pods:\n for lb in pod.limbs:\n row = []\n for br in lb.branches:\n row.append(br)\n self.bm.append(row)\n #print \"appended %d-element row %d\" % (len(row),len(self.bm))",
"def _get_gram_matrix(self, input_tensor):\n num_channels = int(input_tensor.shape[-1])\n input_vectors = tf.reshape(input_tensor, [-1, num_channels])\n num_vectors = tf.shape(input_vectors)[0]\n gram = tf.matmul(input_vectors, input_vectors, transpose_a=True)\n return gram / tf.cast(num_vectors, tf.float32)",
"def get_batch(self, x, y, t):\n t = (torch.ones(x.shape[0]).int() * t)\n\n if len(self.M) > 0:\n MEM = self.M\n order = np.arange(len(MEM))\n np.random.shuffle(order)\n index = order[:min(x.shape[0], len(MEM))]\n\n x = x.cpu()\n y = y.cpu()\n\n for k, idx in enumerate(index):\n ox, oy, ot = MEM[idx]\n x = torch.cat((x, ox.unsqueeze(0)), 0)\n y = torch.cat((y, oy.unsqueeze(0)), 0)\n t = torch.cat((t, ot.unsqueeze(0)), 0)\n\n # handle gpus if specified\n if self.cuda:\n x = x.cuda()\n y = y.cuda()\n t = t.cuda()\n\n return x, y, t",
"def batch_process_array(\n file: str, zarr_file: str, m: int, n: int, batch_size: int, chunk_size: int\n) -> None:\n z = zarr.open(zarr_file, mode=\"a\")\n z[\"X\"] = zarr.empty((m, 0), chunks=(m, chunk_size), dtype=\"float32\")\n\n with h5py.File(file, \"r\") as f:\n for i in range(n // batch_size + 1):\n j = i * batch_size\n if j + batch_size > n:\n batch_size = n - j\n k = j + batch_size\n\n matrix = f[\"X\"][:, j:k]\n\n z[\"X\"].append(matrix, axis=1)\n return",
"def bartlett(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1, float)\n n = arange(0,M)\n return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))",
"def batch_sample(n, m, seed= 0):\n stream = sample(n, seed)\n while True:\n yield np.fromiter(stream, np.int, m)",
"def copie_matrice(m):\n return [m[i][:] for i in range(len(m))]"
]
| [
"0.61024183",
"0.6085936",
"0.5908195",
"0.5840925",
"0.5817203",
"0.56533",
"0.562997",
"0.5583973",
"0.556033",
"0.55166227",
"0.54819024",
"0.5479146",
"0.5469392",
"0.5464788",
"0.54570687",
"0.54411006",
"0.5395656",
"0.534545",
"0.5327879",
"0.53123766",
"0.5294155",
"0.52935266",
"0.5244257",
"0.5231091",
"0.5228254",
"0.52251804",
"0.51950794",
"0.51867056",
"0.5185075",
"0.5155229"
]
| 0.66467696 | 0 |
Send all tensors contained in `x` to `device`, when `x` is an arbitrary nested datastructure of dicts and lists containing tensors | def send_to_device(x, device, non_blocking=False):
if isinstance(x, torch.Tensor):
return x.to(device, non_blocking=non_blocking)
elif isinstance(x, list):
return [
send_to_device(xx, device, non_blocking=non_blocking) for xx in x
]
elif isinstance(x, tuple):
return tuple(
send_to_device(xx, device, non_blocking=non_blocking) for xx in x)
elif isinstance(x, dict):
return {
k: send_to_device(v, device, non_blocking=non_blocking)
for k, v in x.items()
}
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_device(data, device):\n if isinstance(data, (list,tuple)): # allows to apply function to lists or tuples of tensors\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def to(self, device):\n for item in self.data:\n if torch.is_tensor(item):\n item.to(item)\n else:\n for subitem in item:\n subitem.to(device)\n return self",
"def place_on_device(x, device):\n if isinstance(x, list):\n return [place_on_device(xi, device) for xi in x]\n elif isinstance(x, dict):\n return {k: place_on_device(v, device) for k, v in x.items()}\n elif isinstance(x, set):\n return {place_on_device(xi, device) for xi in x}\n elif isinstance(x, tuple):\n return tuple(place_on_device(xi, device) for xi in x)\n elif isinstance(x, torch.Tensor):\n return x\n else:\n return x",
"def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch",
"def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch",
"def any2device(value, device):\n if isinstance(value, dict):\n return dict((k, any2device(v, device)) for k, v in value.items())\n elif isinstance(value, (tuple, list)):\n return list(any2device(v, device) for v in value)\n elif torch.is_tensor(value):\n return value.to(device, non_blocking=True)\n elif isinstance(value, (np.ndarray, np.void)) \\\n and value.dtype.fields is not None:\n return dict(\n (k, any2device(value[k], device))\n for k in value.dtype.fields.keys()\n )\n elif isinstance(value, np.ndarray):\n return torch.Tensor(value).to(device)\n return value",
"def batch_obs(\n observations: List[Dict], device: Optional[torch.device] = None\n) -> Dict[str, torch.Tensor]:\n batch = defaultdict(list)\n\n for obs in observations:\n for sensor in obs:\n batch[sensor].append(_to_tensor(obs[sensor]))\n\n for sensor in batch:\n batch[sensor] = (\n torch.stack(batch[sensor], dim=0)\n .to(device=device)\n .to(dtype=torch.float)\n )\n\n return batch",
"def to_device(m: torch.nn.Module, x:torch.Tensor):\n if isinstance(m, torch.nn.Module):\n device = next(m.parameters()).device\n elif isinstance(m, torch.Tensor):\n device = m.device\n else:\n raise TypeError(\n \"Expected torch.nn.Module or torch.tensor, \" f\"bot got: {type(m)}\"\n )\n return x.to(device)",
"def _send(x, dst=0):\n x = torch.tensor(x)\n x = to_device(x)\n dist.send(x, dst)\n del x \n torch.cuda.empty_cache()",
"def send_to_device(tensor, device, non_blocking=False, skip_keys=None):\n if isinstance(tensor, (tuple, list)):\n return honor_type(\n tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)\n )\n elif isinstance(tensor, Mapping):\n if isinstance(skip_keys, str):\n skip_keys = [skip_keys]\n elif skip_keys is None:\n skip_keys = []\n return type(tensor)(\n {\n k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)\n for k, t in tensor.items()\n }\n )\n elif hasattr(tensor, \"to\"):\n try:\n return tensor.to(device, non_blocking=non_blocking)\n except TypeError: # .to() doesn't accept non_blocking as kwarg\n return tensor.to(device)\n else:\n return tensor",
"def to_device(input, device=\"cuda\", dtype=None):\n\n if 'image' in input:\n input['image'] = input['image'].to(dtype=dtype)\n\n def transfer(x):\n if torch.is_tensor(x):\n return x.to(device=device)\n elif isinstance(x, list):\n return [transfer(_) for _ in x]\n elif isinstance(x, Mapping):\n return type(x)({k: transfer(v) for k, v in x.items()})\n else:\n return x\n\n return {k: transfer(v) for k, v in input.items()}",
"def _recv(x, src=0):\n\n x = torch.tensor(x) if torch.is_tensor(x) == False else x\n x = to_device(x)\n dist.recv(tensor=x, src=src)\n x.to('cpu')\n \n try:\n return x.item() # single element\n except:\n return x.tolist() # list of tensors",
"def recursive_copy_to_device(value: Any, non_blocking: bool, device: torch.device) -> Any:\n\n if isinstance(value, torch.Tensor):\n return value.to(device, non_blocking=non_blocking)\n\n if isinstance(value, (list, tuple)):\n values = []\n for val in value:\n values.append(recursive_copy_to_device(val, non_blocking=non_blocking, device=device))\n\n return values if isinstance(value, list) else tuple(values)\n\n if isinstance(value, container_abcs.Mapping):\n device_val: Dict[str, Any] = {}\n for key, val in value.items():\n device_val[key] = recursive_copy_to_device(val, non_blocking=non_blocking, device=device)\n\n return device_val\n\n return value",
"def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def _recursive_to_cuda(self, tensors: Union[Tuple[torch.Tensor], torch.Tensor]) \\\n -> Union[List[torch.Tensor], torch.Tensor]:\n if self.device is None: # keep on cpu\n return tensors\n\n if type(tensors) != list and type(tensors) != tuple: # not only for torch.Tensor\n return tensors.to(device=self.device)\n\n cuda_tensors = list()\n for i in range(len(tensors)):\n cuda_tensors.append(self._recursive_to_cuda(tensors[i]))\n return cuda_tensors",
"def to_device(data, device):\r\n if isinstance(data, (list,tuple)):\r\n return [to_device(x, device) for x in data]\r\n return data.to(device, non_blocking=True)",
"def to_device(data, device):\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def astensors(*xs, dtype=None, device=None,\r\n backend=None, escape=None):\r\n backend = gg.backend(backend)\r\n device = gf.device(device, backend)\r\n return _astensors_fn(*xs,\r\n dtype=dtype,\r\n device=device,\r\n backend=backend,\r\n escape=escape)",
"def to_device(batch: Dict[str, torch.Tensor], device: torch.device) -> Dict[str, torch.Tensor]:\n\n return {key: val.to(device) for key, val in batch.items()}",
"def to_var(x, requires_grad=False):\n if isinstance(x, list):\n for k in x:\n if torch.cuda.is_available():\n k = k.cuda()\n k = torch.tensor(k, requires_grad=requires_grad)\n else:\n if torch.cuda.is_available():\n x = x.cuda()\n x = torch.tensor(x, requires_grad=requires_grad)\n\n return x",
"def to_numpy(x: torch.Tensor) -> np.ndarray:\n if isinstance(x, dict):\n r = {}\n for k, v in x.items():\n if isinstance(v, torch.Tensor):\n if v.device.type == 'cuda':\n r.update({k: v.detach().cpu().numpy()})\n else:\n r.update({k: v.detach().numpy()})\n else:\n r.update({k: v})\n return r\n else:\n if x.device.type == 'cuda':\n return x.detach().cpu().numpy()\n else:\n return x.detach().numpy()",
"def voxelnet_concat(batch, device=None, padding=None):\n if len(batch) == 0:\n raise ValueError('batch is empty')\n\n first_elem = batch[0]\n\n if isinstance(first_elem, tuple):\n result = []\n if not isinstance(padding, tuple):\n padding = [padding] * len(first_elem)\n\n for i in six.moves.range(len(first_elem)):\n result.append(to_device(device, _concat_arrays(\n [example[i] for example in batch], padding[i])))\n del batch\n return tuple(result)\n\n elif isinstance(first_elem, dict):\n result = {}\n if not isinstance(padding, dict):\n padding = {key: padding for key in first_elem}\n\n for key in first_elem:\n result[key] = to_device(device, _concat_arrays(\n [example[key] for example in batch], padding[key]))\n del batch\n return result\n\n else:\n return to_device(device, _concat_arrays(batch, padding))",
"def move_to_device(input_tensors: List[torch.Tensor],\n target_device: Optional[torch.device],\n non_blocking: bool = False) -> Iterable[torch.Tensor]:\n return (tensor if tensor.device == target_device or target_device is None\n else tensor.to(target_device, non_blocking=non_blocking)\n for tensor in input_tensors)",
"def dequeue_and_enqueue(self, x: Tensor) -> None:\n # Gather representations from all GPUs into a [batch_size * world_size, num_features] tensor, in case of\n # distributed training.\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n x = concatenate_all(x)\n\n queue_size = self.representations.shape[1]\n batch_size = x.shape[0]\n if queue_size % batch_size != 0:\n raise ValueError(f\"Queue size ({queue_size}) is not a multiple of the batch size ({batch_size}).\")\n\n end = self.pointer + batch_size\n self.representations[:, int(self.pointer) : int(end)] = x.T\n self.pointer = end % queue_size",
"def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array):\n def recv_data(k, data_stream, actor_set, real_data_tasks_i):\n for real_data in data_stream:\n tmp = []\n tmp.append(real_data.state)\n tmp.append(real_data.action)\n tmp.append(real_data.reward)\n tmp.append(real_data.next_state)\n tmp.append(real_data.done)\n tmp.append(actor_set[k]['w'][real_data.idx])\n tmp.append(actor_set[k]['i'][real_data.idx])\n tmp.append(actor_set[k]['t'][real_data.idx])\n tmp.append(real_data.timestamp)\n local_dict[actor_set[k]['i'][real_data.idx]] = tmp\n cache_array[actor_set[k]['i'][real_data.idx]] |= 2**pid\n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['states'].append(decom_state) #.to(device))\n real_data_tasks_i['actions'].append(torch.LongTensor([real_data.action])) #.to(device))\n real_data_tasks_i['rewards'].append(torch.FloatTensor([real_data.reward])) #.to(device))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.next_state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['next_states'].append(decom_next_state) #.to(device))\n real_data_tasks_i['dones'].append(torch.FloatTensor([real_data.done])) #.to(device))\n real_data_tasks_i['batch_weights'].append(torch.FloatTensor([actor_set[k]['w'][real_data.idx]])) #.to(device))\n real_data_tasks_i['batch_idxes'].append(actor_set[k]['i'][real_data.idx])\n # is the data overwrited?\n real_data_tasks_i['batch_timestamp_store'].append(actor_set[k]['t'][real_data.idx])\n real_data_tasks_i['batch_timestamp_real'].append(real_data.timestamp)\n conn = grpc.insecure_channel(port_dict['replay_ip'] + ':' + port_dict['sampleDataPort'])\n client = apex_data_pb2_grpc.SampleDataStub(channel=conn)\n local_dict = {}\n while True:\n batch_timestamp_real = []\n batch_timestamp_store = []\n batch_weights = []\n batch_idxes = []\n\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n res_batch = client.Send(apex_data_pb2.SampleDataRequest(batch_size=args.batch_size, beta = args.beta))\n actor_ids, data_ids, timestamps, weights, idxes = res_batch.actor_ids, res_batch.data_ids, res_batch.timestamp, res_batch.weights, res_batch.idxes\n actor_set = {}\n cached_value = {'states':{},'actions':{},'rewards':{},'next_states':{},'dones':{},'batch_weights':{},'batch_idxes':{},'batch_timestamp_store':{},'batch_timestamp_real':{}}\n for i in range(len(actor_ids)):\n set_a = actor_set.get(actor_ids[i], False)\n if set_a == False:\n actor_set[actor_ids[i]] = {}\n set_a = actor_set[actor_ids[i]]\n set_a['d'] = []\n set_a['w'] = []\n set_a['i'] = []\n set_a['t'] = []\n cached_value['states'][actor_ids[i]] = []\n cached_value['actions'][actor_ids[i]] = []\n cached_value['rewards'][actor_ids[i]] = []\n cached_value['next_states'][actor_ids[i]] = []\n cached_value['dones'][actor_ids[i]] = []\n cached_value['batch_weights'][actor_ids[i]] = []\n cached_value['batch_idxes'][actor_ids[i]] = []\n cached_value['batch_timestamp_store'][actor_ids[i]] = []\n cached_value['batch_timestamp_real'][actor_ids[i]] = []\n cache_id = actor_ids[i]*local_size+data_ids[i]\n cache_trans = cache_array[cache_id]\n if cache_trans & 2**pid == 0:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n if cache_trans == 0 and local_dict.get(cache_id, False) != False:\n del local_dict[cache_id]\n else:\n try:\n state_tmp = local_dict[cache_id][0]\n action_tmp = local_dict[cache_id][1]\n reward_tmp = local_dict[cache_id][2] \n next_state_tmp = local_dict[cache_id][3] \n done_tmp = local_dict[cache_id][4] \n batch_weight_tmp = local_dict[cache_id][5] \n batch_idx_tmp = local_dict[cache_id][6] \n batch_store_tmp = local_dict[cache_id][7] \n batch_real_tmp = local_dict[cache_id][8] \n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['states'][actor_ids[i]].append(decom_state)\n cached_value['actions'][actor_ids[i]].append(torch.LongTensor([action_tmp]))\n cached_value['rewards'][actor_ids[i]].append(torch.FloatTensor([reward_tmp]))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(next_state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['next_states'][actor_ids[i]].append(decom_next_state)\n cached_value['dones'][actor_ids[i]].append(torch.FloatTensor([done_tmp]))\n cached_value['batch_weights'][actor_ids[i]].append(torch.FloatTensor([batch_weight_tmp]))\n cached_value['batch_idxes'][actor_ids[i]].append(batch_idx_tmp)\n cached_value['batch_timestamp_store'][actor_ids[i]].append(batch_store_tmp)\n cached_value['batch_timestamp_real'][actor_ids[i]].append(batch_real_tmp)\n except:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n real_data_links = {}\n real_data_tasks = {}\n for k, v in actor_set.items():\n actor_ip, data_port = actor_id_to_ip_dataport[k]\n conn_actor = grpc.insecure_channel(actor_ip + ':' + data_port)\n client_actor = apex_data_pb2_grpc.SendRealDataStub(channel=conn_actor)\n real_data_links[k] = client_actor.Send(apex_data_pb2.RealBatchRequest(idxes=v['d']))\n real_data_tasks[k] = {}\n real_data_tasks[k]['states'] = cached_value['states'][k]\n real_data_tasks[k]['actions'] = cached_value['actions'][k]\n real_data_tasks[k]['rewards'] = cached_value['rewards'][k]\n real_data_tasks[k]['next_states'] = cached_value['next_states'][k]\n real_data_tasks[k]['dones'] = cached_value['dones'][k]\n real_data_tasks[k]['batch_weights'] = cached_value['batch_weights'][k]\n real_data_tasks[k]['batch_idxes'] = cached_value['batch_idxes'][k]\n real_data_tasks[k]['batch_timestamp_store'] = cached_value['batch_timestamp_store'][k]\n real_data_tasks[k]['batch_timestamp_real'] = cached_value['batch_timestamp_real'][k]\n threads = []\n for k, v in real_data_links.items():\n t = threading.Thread(target=recv_data, args=(k, v, actor_set, real_data_tasks[k],))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n for k, v in real_data_tasks.items():\n states += v['states']\n actions += v['actions']\n rewards += v['rewards']\n next_states += v['next_states']\n dones += v['dones']\n batch_weights += v['batch_weights']\n batch_idxes += v['batch_idxes']\n batch_timestamp_real += v['batch_timestamp_real']\n batch_timestamp_store += v['batch_timestamp_store']\n\n states = torch.cat(states,0).to(device)\n actions = torch.cat(actions,0).to(device)\n rewards = torch.cat(rewards,0).to(device)\n next_states = torch.cat(next_states,0).to(device)\n dones = torch.cat(dones,0).to(device)\n batch_weights = torch.cat(batch_weights,0).to(device)\n\n batch = [states, actions, rewards, next_states, dones, batch_weights, batch_idxes]\n batch_queue.put(batch)\n data, batch = None, None",
"def tohost(x):\n\n def single_tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))\n\n return jax.tree_map(single_tohost, x)",
"def _fn_scatter(input, devices, streams=None):\n if streams is None:\n streams = [None] * len(devices)\n\n if isinstance(input, list):\n chunk_size = (len(input) - 1) // len(devices) + 1\n outputs = [\n _fn_scatter(input[i], [devices[i // chunk_size]],\n [streams[i // chunk_size]]) for i in range(len(input))\n ]\n return outputs\n elif isinstance(input, torch.Tensor):\n output = input.contiguous()\n # TODO: copy to a pinned buffer first (if copying from CPU)\n stream = streams[0] if output.numel() > 0 else None\n with torch.cuda.device(devices[0]), torch.cuda.stream(stream):\n output = output.cuda(devices[0], non_blocking=True)\n return output\n else:\n raise Exception('Unknown type {}.'.format(type(input)))"
]
| [
"0.67284745",
"0.66008097",
"0.6446256",
"0.6315414",
"0.62373644",
"0.62007165",
"0.6134738",
"0.6002824",
"0.5938364",
"0.5871974",
"0.5869342",
"0.57675153",
"0.5750202",
"0.56997794",
"0.56997794",
"0.56997794",
"0.56997794",
"0.5687243",
"0.56803846",
"0.56477666",
"0.5639209",
"0.5631587",
"0.5609783",
"0.5606898",
"0.5497997",
"0.54887915",
"0.54004014",
"0.5362331",
"0.53212374",
"0.5282605"
]
| 0.68736905 | 0 |
Recursively call state_dict() on all elements contained in a list / tuple / dict so that it can be saved safely via torch.save(). | def recursive_state_dict(x):
if hasattr(x, 'state_dict'):
return x.state_dict()
if isinstance(x, tuple):
return tuple(recursive_state_dict(xx) for xx in x)
if isinstance(x, list):
return [recursive_state_dict(xx) for xx in x]
if isinstance(x, dict):
return {k: recursive_state_dict(v) for k, v in x.items()} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _post_state_dict_hook(\n module: nn.Module,\n state_dict: Dict[str, Any],\n prefix: str,\n *args: Any,\n ) -> Dict[str, Any]:\n self = cast(FullyShardedDataParallel, module)\n processed_state_dict = self._post_state_dict_hook_fn[self._state_dict_type](state_dict, prefix)\n # Restore buffers, which currently are in their full precision type,\n # back to their mixed precision type. This is because buffers are cast\n # during lazy_init() and stay at their mixed precision type before/after\n # forward/backward. As a result state_dict() should maintain this.\n if (\n self._is_root\n and self._mixed_precision_enabled_for_buffers()\n ):\n self._cast_buffers(recurse=True)\n return processed_state_dict",
"def state_dict(self, *args, **kwargs):\n return self.module.state_dict(*args, **kwargs)",
"def load_recursive_state_dict(x, obj):\n if hasattr(obj, 'load_state_dict'):\n obj.load_state_dict(x)\n if isinstance(x, (tuple, list)):\n for xx, oo in zip(x, obj):\n load_recursive_state_dict(xx, oo)\n if isinstance(x, dict):\n for k in objs.keys():\n load_recursive_state_dict(xx[k], oo[k])",
"def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass",
"def to_dict(self):\n state = self.__getstate__()\n\n for (trait_name, trait_value) in state.iteritems():\n if isinstance(trait_value, list):\n new_list = []\n for element in trait_value:\n if isinstance(element, Model):\n element_dict = element.to_dict()\n new_list.append(element_dict)\n else:\n new_list.append(element)\n state[trait_name] = new_list\n elif isinstance(trait_value, dict):\n new_dict = {}\n for (key, value) in trait_value.iteritems():\n if isinstance(value, Model):\n value_dict = value.to_dict()\n new_dict[key] = value_dict\n else:\n new_dict[key] = value\n state[trait_name] = new_dict\n elif isinstance(trait_value, Model):\n value_dict = trait_value.to_dict()\n state[trait_name] = value_dict\n\n return state",
"def state_(state):\n return tuple( [ tuple( row ) for row in state ] )",
"def _sharded_state_dict(self, *args: Any, **kwargs: Any) -> Any:\n with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):\n return self.state_dict(self, *args, **kwargs)",
"def _flatten_incr_state(\n self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state",
"def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)",
"def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)",
"def toState(attrs=ALL):",
"def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)",
"def __setstate__(self, statedict):\n for k, v in list(statedict.items()):\n setattr(self, k, v)",
"def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }",
"def obs2state(observation):\n l1 = [val.tolist() for val in list(observation.values())]\n l2 = []\n for sublist in l1:\n try:\n l2.extend(sublist)\n except:\n l2.append(sublist)\n return torch.FloatTensor(l2).view(1, -1)",
"def upgrade_state_dict(self, state_dict):\n return state_dict",
"def upgrade_state_dict(self, state_dict):\n return state_dict",
"def repackage_state(h):\n if not h:\n return None\n elif type(h) == V:\n return V(h.data)\n else:\n return list(repackage_state(v) for v in h)",
"def load_state_dict(self, arg):\n self.TrajectoryAutoencoder.load_state_dict(torch.load(arg))",
"def __setstate__(self, d):\n self.temp_yaml = None\n self.__dict__.update(d)",
"def state_dict(self):\r\n return {'ImageModel': self.image_model.state_dict(),\r\n 'QuestionModel': self.question_model.state_dict(),\r\n 'AttentionModel': self.attention.state_dict()\r\n }",
"def save_state_to_dict(self):\n return self.__dict__.copy()",
"def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts",
"def __setstate__(self, state: Dict[str, Any]) -> None:\n self.__dict__ = state.copy()\n # Once state is ingested - repopulate, NOT recursing.\n # Child segments will do it for themselves on unpickling.\n self.set_as_parent(recurse=False)",
"def set_state(canvas, state):\n for key, value in state.items():\n set_attribute(canvas, key, value)",
"def __getstate__(self):\n return dict(self.items())",
"def __getstate__(self):\n return dict(self.items())",
"def _fast_inference_update_state(self, inputs, state):\n # Fast inference: run step-by-step, storing the sequence\n # of keys and values calculated so far in state.\n new_k, new_v = inputs\n length = new_k.shape[1]\n (ks, vs, idx) = state\n\n # We cannot generate more than one token because it contradicts\n # all autoregressive properties\n assert length == 1\n\n new_index = idx // self._total_kv_pooling\n\n if self._chunk_len is not None:\n if self._chunk_offset != 0:\n new_index -= self._chunk_offset * (new_index >= self._chunk_offset)\n\n new_index = new_index % self._chunk_len\n\n # Keys and values are of shape [batch_size, length, d_kv].\n ks = fastmath.dynamic_update_slice_in_dim(ks, new_k, new_index, axis=1)\n vs = fastmath.dynamic_update_slice_in_dim(vs, new_v, new_index, axis=1)\n\n self.state = ks, vs, idx + self._n_raw_tokens_generated",
"def fromState(state):",
"def _local_state_dict(self, *args: Any, **kwargs: Any) -> Any:\n with self.state_dict_type(self, StateDictType.LOCAL_STATE_DICT):\n return self.state_dict(*args, **kwargs)"
]
| [
"0.60192704",
"0.5873791",
"0.58293617",
"0.57695657",
"0.57536656",
"0.5732549",
"0.57282263",
"0.57053864",
"0.5699387",
"0.5699387",
"0.5684025",
"0.5641588",
"0.5641588",
"0.5633706",
"0.5621523",
"0.5619566",
"0.5619566",
"0.5589749",
"0.55805403",
"0.55455166",
"0.5515815",
"0.5511081",
"0.5509435",
"0.55040765",
"0.55035067",
"0.5496144",
"0.5496144",
"0.5483253",
"0.54777884",
"0.54763156"
]
| 0.6610024 | 0 |
r""" Spherical linear interpolate between `z1` and `z2` according to `t`. | def slerp(z1, z2, t):
z1_l = z1.pow(2).sum(dim=-1, keepdim=True).sqrt()
z1_n = z1 / z1_l
z2_l = z2.pow(2).sum(dim=-1, keepdim=True).sqrt()
z2_n = z2 / z2_l
dot = torch.sum(z1_n * z2_n, dim=-1).clamp(-1, 1)
theta_0 = torch.acos(dot)
theta = t * theta_0
z3 = z2_n - dot * z1_n
z3 = z3 / z3.pow(2).sum(dim=-1, keepdim=True).sqrt()
return lerp(z1_l, z2_l, t) * (z1_n * torch.cos(theta) + z3 * torch.sin(theta)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interpolate(x_list, y_list, z_list):\n x1 = x_list[-2]\n x2 = x_list[-1]\n y1 = y_list[-2]\n y2 = y_list[-1]\n z1 = z_list[-2]\n z2 = z_list[-1]\n r = -y1/y2\n x_land = (x1+r*x2)/(r+1)\n z_land = (z1+r*z2)/(r+1)\n x_list[-1] = x_land\n y_list[-1] = 0.0\n z_list[-1] = z_land",
"def linear_interpolate_value_at_time(t0, v0, t1, v1, t):\n return v0 + linear_interpolate_value_change(t0, v0, t1, v1, t - t0)",
"def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt",
"def slerp(self, other, t):\n a, b = Vector3(self), Vector3(other)\n theta = a.angle(b)\n return type(self)(a * sind((1 - t) * theta) + b * sind(t * theta)) / sind(theta)",
"def interpolate(self, a, b, t):\n d1 = (t - a[0]).seconds\n d2 = (b[0] - t).seconds\n # The total time difference\n d = float(d1 + d2)\n point = []\n # Need to return a (time, lat, lon, elev) point\n point.append(t)\n # Linear interpolation of the latitude, longitude, and elevation\n point.append(float(a[1])*(d2/d) + float(b[1])*(d1/d))\n point.append(float(a[2])*(d2/d) + float(b[2])*(d1/d))\n point.append(float(a[3])*(d2/d) + float(b[3])*(d1/d))\n if self.verbose:\n sys.stderr.write('Interpolate:\\n')\n sys.stderr.write('\\t%s\\n' % repr(a))\n sys.stderr.write('\\t%s\\n' % repr(point))\n sys.stderr.write('\\t%s\\n' % repr(b))\n return point",
"def interp_n2(t, x, y):\n\n return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])",
"def _lerp(self, lo: float, hi: float, t: float):\r\n return lo + t * (hi - lo)",
"def interpolate_hypersphere(v1, v2, num_steps):\n v1_norm = tf.norm(v1)\n v2_norm = tf.norm(v2)\n v2_normalized = v2 * (v1_norm / v2_norm)\n vectors = []\n for step in range(num_steps):\n interpolated = v1 + (v2_normalized - v1) * step / (num_steps - 1)\n interpolated_norm = tf.norm(interpolated)\n interpolated_normalized = interpolated * (v1_norm / interpolated_norm)\n vectors.append(interpolated_normalized)\n return tf.stack(vectors)",
"def interp_cubic(p0, p1, t_abs):\n\tT = (p1.time_from_start - p0.time_from_start).to_sec()\n\tt = t_abs - p0.time_from_start.to_sec()\n\tq = [0] * 6\n\tqdot = [0] * 6\n\tqddot = [0] * 6\n\tfor i in range(len(p0.positions)):\n\t\ta = p0.positions[i]\n\t\tb = p0.velocities[i]\n\t\tc = (-3 * p0.positions[i] + 3 * p1.positions[i] - 2 * T * p0.velocities[i] - T * p1.velocities[i]) / T**2\n\t\td = (2 * p0.positions[i] - 2 * p1.positions[i] + T * p0.velocities[i] + T * p1.velocities[i]) / T**3\n\n\t\tq[i] = a + b * t + c * t**2 + d * t**3\n\t\tqdot[i] = b + 2 * c * t + 3 * d * t**2\n\t\tqddot[i] = 2 * c + 6 * d * t\n\treturn JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))",
"def lerp(self, t):\n a = self.a0 + t * self.da\n return self.c + Vector((self.r * cos(a), self.r * sin(a)))",
"def _lerp(a, b, t, out=None):\n diff_b_a = subtract(b, a)\n # asanyarray is a stop-gap until gh-13105\n lerp_interpolation = asanyarray(add(a, diff_b_a*t, out=out))\n subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t>=0.5)\n if lerp_interpolation.ndim == 0 and out is None:\n lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays\n return lerp_interpolation",
"def v_from_z(z1, z2):\n R = (1+z1) / (1+z2)\n v = const.c * (R**2 - 1)/(1+R**2) \n\n return v.to('km/s')",
"def linear(e0, e1, t0, t1, e):\n alpha = max(0, min(1, (e - e0) / (e1 - e0))) # what fraction of the way through are we\n t = alpha * t1 + (1 - alpha) * t0 # interpolate accordingly\n return t",
"def lerp(a, b, t):\n return (1 - t) * a + t * b",
"def get_z_from_t(t):\n\n\n global z_from_t_interp\n\n if z_from_t_interp is None: initialize()\n\n return z_from_t_interp(t)",
"def interpolate(Position_i1, Position_i2, inBetween):\r\n \r\n # Chain the timestamps together and calculate the delta t\r\n T1 = Position_i1[0][:5] + [int(Position_i1[0][5])] + [int((Position_i1[0][5]%1)*1000000)] \r\n T2 = Position_i2[0][:5] + [int(Position_i2[0][5])] + [int((Position_i2[0][5]%1)*1000000)]\r\n dt = dT(T1, T2)\r\n\r\n # Do the same with longitude and latitude.\r\n ds = [i[1]- i[0] for i in zip(Position_i1[1], Position_i2[1])]\r\n \r\n # Calculate the \"slope\":\r\n ds_dt = [i/dt for i in ds]\r\n \r\n # Make the timeStamp for (inBetween) more precise:\r\n precise_Time = inBetween[:5] + [int(inBetween[5])] + [int((inBetween[5]%1)*1000000)]\r\n \r\n # Calculate the time since the first measurement passed till \"inBetween\"\r\n DeltaT = dT(T1, precise_Time)\r\n\r\n # Calculate the precise position at \"inBetween\":\r\n Position = [DeltaT* i[0] + i[1] for i in zip(ds_dt, Position_i1[1])]\r\n\r\n # Then return the [timeStamp, [lon, lat]]\r\n return [inBetween, Position]",
"def linear_spline_interpolation(q_, t_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n t_ = t_ / m\n timesteps = np.linspace(0, 1, num = m)\n\n a = 100\n time_segments = np.zeros((n, dof))\n blend_times = np.zeros((n, dof))\n velocities = np.zeros((n, dof))\n accelerations = np.zeros((n, dof))\n\n # Initial time segment\n accelerations[0] = np.sign(q_[1] - q_[0]) * a\n blend_times[0] = t_[0] - np.sqrt(\n t_[0] * t_[0] - 2 * (q_[1] - q_[0]) / accelerations[0])\n velocities[0] = (q_[1] - q_[0]) / (t_[0] - 0.5 * blend_times[0])\n\n # Final time segment\n accelerations[n - 1] = np.sign(q_[n - 2] - q_[n - 1]) * a\n blend_times[n - 1] = t_[n - 2] - np.sqrt(\n t_[n - 2] * t_[n - 2] + 2 * (q_[n - 1] - q_[n - 2]) / accelerations[n - 1])\n velocities[n - 2] = (q_[n - 1] - q_[n - 2]) / (t_[n - 2] - 0.5 * blend_times[n - 1])\n velocities[n - 1] = 0\n\n # Loop for velocities\n for i in range(1, n - 2):\n velocities[i] = (q_[i + 1] - q_[i]) / t_[i]\n\n # Loop for accelerations and blend times\n for i in range(1, n - 1):\n accelerations[i] = np.sign(velocities[i] - velocities[i - 1]) * a\n blend_times[i] = (velocities[i] - velocities[i - 1]) / accelerations[i]\n\n # Calculate time segments\n time_segments[0] = t_[0] - blend_times[0] - 0.5 * blend_times[1]\n time_segments[n - 2] = t_[n - 2] - blend_times[n - 1] - 0.5 * blend_times[n - 2]\n time_segments[n - 1] = 0\n for i in range(1, n - 2):\n time_segments[i] = t_[i] - 0.5 * blend_times[i + 1] - 0.5 * blend_times[i]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n # Loop for each degree of freedom\n for d in range(dof):\n # j for using above parameters\n # previous_i for saving i of start of a parabola segment\n # previous_ii for saving i of start of a linear segment\n j = 0\n previous_i = 0\n previous_ii = 0\n\n # Loop over the timesteps\n for i in range(len(timesteps)):\n t = timesteps[i] - timesteps[previous_i]\n\n # If t is in the parabola range\n if t <= blend_times[j][d]:\n a = accelerations[j][d]\n\n qdd[d, i] = a\n qd[d, i] = qd[d, previous_i] + a * t\n\n if i == 0:\n q[d, i] = q_[0][d] + 0.5 * a * t * t\n else:\n q[d, i] = q[d, previous_i] + qd[d, previous_i] * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # If t is in the linear range\n elif t <= blend_times[j][d] + time_segments[j][d]:\n t = timesteps[i] - timesteps[previous_ii]\n v = velocities[j][d]\n\n qdd[d, i] = 0\n qd[d, i] = v\n q[d, i] = q[d, previous_ii] + v * t\n\n # If t has crossed the parabola plus the linear range\n else:\n previous_i = i - 1\n j += 1\n\n t = timesteps[i] - timesteps[previous_i]\n\n # Break loop if parameter exceeded\n if j == len(accelerations):\n break\n\n a = accelerations[j][d]\n v = qd[d, previous_i]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # Loop over remaining timesteps\n while i < len(timesteps):\n a = accelerations[j - 1][d]\n v = velocities[j - 1][d]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n i += 1\n\n return q, qd, qdd",
"def interpolate_2d(x, y, z, x_new, y_new):\n fun = RectBivariateSpline(x, y, z, kx=1, ky=1) # linear interpolation\n return fun(x_new, y_new)",
"def linear_interp2d(z, map_lower, map_higher, comoving_dist=False, NewProjected=False):\n with h5py.File(map_lower, \"r\") as ds1, h5py.File(map_higher, \"r\") as ds2:\n if NewProjected:\n dm_name = \"map\"\n header_name = \"Header\"\n else:\n dm_name = \"DM\"\n header_name = \"HEADER\"\n\n y2 = ds2[dm_name][:]\n y1 = ds1[dm_name][:]\n\n if comoving_dist:\n x2 = z_to_mpc(ds2[header_name].attrs[\"Redshift\"])\n x1 = z_to_mpc(ds1[header_name].attrs[\"Redshift\"])\n dist = z_to_mpc(z) - x1\n else:\n x2 = ds2[header_name].attrs[\"Redshift\"]\n x1 = ds1[header_name].attrs[\"Redshift\"]\n dist = z - x1\n\n grad = (y2 - y1)/ (x2 - x1)\n\n return grad * dist + y1",
"def slerp(q0: np.ndarray, q1: np.ndarray, t_array: np.ndarray, threshold: float = 0.9995) -> np.ndarray:\n qdot = np.dot(q0, q1)\n # Ensure SLERP takes the shortest path\n if qdot < 0.0:\n q1 *= -1.0\n qdot *= -1.0\n # Interpolate linearly (LERP)\n if qdot > threshold:\n result = q0[np.newaxis, :] + t_array[:, np.newaxis]*(q1 - q0)[np.newaxis, :]\n return (result.T / np.linalg.norm(result, axis=1)).T\n # Angle between vectors\n theta_0 = np.arccos(qdot)\n sin_theta_0 = np.sin(theta_0)\n theta = theta_0*t_array\n sin_theta = np.sin(theta)\n s0 = np.cos(theta) - qdot*sin_theta/sin_theta_0\n s1 = sin_theta/sin_theta_0\n return s0[:, np.newaxis]*q0[np.newaxis, :] + s1[:, np.newaxis]*q1[np.newaxis, :]",
"def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y",
"def interpolate ( hsl1, hsl2, amt ):\n if isinstance( hsl1, Colz ):\n h1 = hsl1.h\n s1 = hsl1.s\n l1 = hsl1.l\n a1 = hsl1.a\n if isinstance( hsl1, list ):\n h1 = hsl1[0]\n s1 = hsl1[1]\n l1 = hsl1[2]\n if len(hsl1) > 3:\n a1 = hsl1[3]\n\n if isinstance( h1, int ):\n h1 = h1 / 360.0\n if isinstance( s1, int ):\n s1 = s1 / 100.0\n if isinstance( l1, int ):\n l1 = l1 / 100.0\n\n if isinstance( hsl2, Colz ):\n h2 = hsl2.h\n s2 = hsl2.s\n l2 = hsl2.l\n a2 = hsl2.a\n if isinstance( hsl2, list ):\n h2 = hsl2[0]\n s2 = hsl2[1]\n l2 = hsl2[2]\n if len(hsl2) > 3:\n a2 = hsl2[3]\n\n if isinstance( h2, int ):\n h2 = h2 / 360.0\n if isinstance( s2, int ):\n s2 = s2 / 100.0\n if isinstance( l2, int ):\n l2 = l2 / 100.0\n\n h3 = Colz.hueLerp( h1, h2, amt )\n s3 = Colz.linearLerp( s1, s2, amt )\n l3 = Colz.linearLerp( l1, l2, amt )\n\n if 'a1' in locals() and 'a2' in locals():\n a3 = Colz.linearLerp( a1, a2, amt )\n else:\n a3 = 1.0\n\n c_result = Colz()\n c_result.setHsla( h3, s3, l3, a3 )\n return c_result",
"def slerp(targettime, time, q):\n #debug_here()\n i_interp_int, t_matrix = compute_t(targettime, time)\n q_interp = mult(q[np.clip(i_interp_int + 1,0,len(time)-1),:], inv(q[i_interp_int,:]))\n q_interp = pow(q_interp, t_matrix) \n q_interp = mult(q_interp, q[i_interp_int,:])\n t_zero = (t_matrix == 0).flatten()\n q_interp[t_zero] = q[i_interp_int][t_zero]\n return q_interp",
"def initialize():\n\n global z_from_t_interp\n\n # Logarithmic spacing\n log_z_set = np.linspace(0.0, 3.0, 300)\n z_set = 10**(log_z_set) - 1.0\n\n t_set = np.zeros(len(z_set))\n for i, z in enumerate(z_set):\n t_set[i] = calc_lookback_time(z) / 1.0e6 # in Myr\n\n z_from_t_interp = interp1d(t_set, z_set, bounds_error=False, fill_value=100.0)",
"def slerp(mat1, mat2, perc=0.05):\n assert 0 <= perc <= 1.0\n assert mat1.shape == mat2.shape # arrays should have same dimension\n if len(mat1.shape) == 1:\n # turn vector into matrix with one row\n mat1 = mat1[np.newaxis, :]\n mat2 = mat2[np.newaxis, :]\n mat1_length = np.linalg.norm(mat1, axis=1)[:, np.newaxis]\n mat2_length = np.linalg.norm(mat2, axis=1)[:, np.newaxis]\n mat1_norm, mat2_norm = mat1 / mat1_length, mat2 / mat2_length\n row_dot_product = (mat1_norm * mat2_norm).sum(axis=1)\n # dot every user profile with its corresponding item attributes\n omega = np.arccos(row_dot_product)\n # note: bad things will happen if the vectors are in exactly opposite\n # directions! this is a pathological case; we are using this function\n # to calculate user profile drift after the user selects an item.\n # but the dot product of a user profile and an item vector in opposite\n # directions is very negative, so a user should almost never select an\n # item in the opposite direction of its own profile.\n if (omega == np.pi).any():\n # raise error if vectors are in exactly opposite directions\n raise ValueError(\n \"Cannot perform spherical interpolation between vectors in opposite direction\"\n )\n sin_o = np.sin(omega)\n unit_rot = (\n np.sin((1.0 - perc) * omega) / sin_o * mat1_norm.T\n + np.sin(perc * omega) / sin_o * mat2_norm.T\n ).T\n return unit_rot * mat1_length",
"def spherical_interpolation(atoms1, atoms2, N, n_center):\n\n pos1 = atoms1.positions.copy()\n pos2 = atoms2.positions.copy()\n c1 = pos1[n_center].mean(axis=0)\n spherical1 = cartesian_to_spherical(pos1, c1)\n c2 = pos2[n_center].mean(axis=0)\n spherical2 = cartesian_to_spherical(pos2, c2)\n\n # Make sure that the shortest rotation is performed.\n for n in range(len(spherical1)):\n theta1 = spherical1[n,1]\n theta2 = spherical2[n,1]\n if theta1 - theta2 > np.pi:\n spherical2[n,1] += 2*np.pi\n elif theta2 - theta1 > np.pi:\n spherical1[n,1] += 2*np.pi\n\n # Interpolate\n images = [atoms1]\n for n in range(N):\n nn = n + 1\n atoms_tmp = atoms1.copy()\n center = ( (N+1-nn)*c1 + nn*c2 ) / (N+1)\n spherical = ( (N+1-nn)*spherical1 + nn*spherical2 ) / (N+1)\n atoms_tmp.positions = spherical_to_cartesian(spherical, center)\n images += [atoms_tmp]\n images += [atoms2.copy()]\n return images",
"def interpolate_2d(x, y, z):\n X = np.linspace(min(x), max(x))\n Y = np.linspace(min(y), max(y))\n X, Y = np.meshgrid(X, Y)\n #f = interpolate.interp2d(x, y, z)\n #Z = f(X[0, :], Y[:, 0])\n f = interpolate.LinearNDInterpolator(zip(x, y), z)\n Z = f(X, Y)\n return X, Y, Z",
"def interpolateLinear( t):\n k = np.searchsorted( keytime, t, side='right') - 1\n u = (t - keytime[k]) / (keytime[k + 1] - keytime[k])\n curframe = (1.0-u)*keyframe[k] + u*keyframe[k+1]\n\n return curframe",
"def linear_interpolate(x, x0, y0, x1, y1):\n try:\n return (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n except ZeroDivisionError:\n return 0.0",
"def interpolate2Dtwice(xMarkers, yMarkers, zGrid1, zGrid2, x, y):\n xi1, xi2 = boundingIndices(xMarkers[0], xMarkers[-1], xMarkers[1]-xMarkers[0], x)\n yi1, yi2 = boundingIndices(yMarkers[0], yMarkers[-1], yMarkers[1]-yMarkers[0], y)\n \n # If out of bounds, return closest point on boundary\n if xi1 == xi2 or yi1 == yi2:\n return zGrid1[yi1, xi1], zGrid2[yi1, xi1]\n \n x1, x2 = xMarkers[xi1], xMarkers[xi2]\n y1, y2 = yMarkers[yi1], yMarkers[yi2]\n c1, c2 = (x2 - x)/(x2 - x1), (x - x1)/(x2 - x1)\n R11 = c1*zGrid1[yi1, xi1] + c2*zGrid1[yi1, xi2]\n R21 = c1*zGrid1[yi2, xi1] + c2*zGrid1[yi2, xi2]\n R12 = c1*zGrid2[yi1, xi1] + c2*zGrid2[yi1, xi2]\n R22 = c1*zGrid2[yi2, xi1] + c2*zGrid2[yi2, xi2]\n return ((y2 - y)/(y2 - y1))*R11 + ((y - y1)/(y2 - y1))*R21, \\\n ((y2 - y)/(y2 - y1))*R12 + ((y - y1)/(y2 - y1))*R22"
]
| [
"0.6512685",
"0.6440983",
"0.61869675",
"0.61836207",
"0.6150891",
"0.5991571",
"0.5864246",
"0.5859889",
"0.58573407",
"0.58359",
"0.58130884",
"0.5802004",
"0.5726565",
"0.56494033",
"0.564807",
"0.56478196",
"0.56105006",
"0.5579916",
"0.5520143",
"0.5476111",
"0.5449468",
"0.5419767",
"0.5326939",
"0.5322573",
"0.5311398",
"0.53082734",
"0.528064",
"0.52760625",
"0.52614754",
"0.52330744"
]
| 0.8154768 | 0 |
When eps is set to zero, any broken frequency will damage the ref_channel estimation. Independent if target_psd_matrix or noise_psd_matrix is zero or inf. | def test_difficulties_without_eps_multi(self):
def get_beamformer(A, B):
return get_mvdr_vector_souden(
A, B,
eps=0,
return_ref_channel=True
)
for args in [
(
[self.PhiXX * 0, self.PhiXX],
[self.PhiNN, self.PhiNN],
),
(
[self.PhiXX, self.PhiXX],
[self.PhiNN * 0, self.PhiNN],
),
(
[self.PhiXX * 0, self.PhiXX],
[self.PhiNN * 0, self.PhiNN],
),
(
[self.PhiXX * np.inf, self.PhiXX],
[self.PhiNN, self.PhiNN],
),
(
[self.PhiXX, self.PhiXX],
[self.PhiNN * np.inf, self.PhiNN],
),
(
[self.PhiXX * np.inf, self.PhiXX],
[self.PhiNN * np.inf, self.PhiNN],
),
]:
with tc.assert_raises(AssertionError):
get_beamformer(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_difficulties_eps_multi(self):\n well_w = self.get_w_well_behaviour()\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n ]:\n w, ref_channel = get_beamformer(*args)\n assert ref_channel == 2, ref_channel\n np.testing.assert_allclose(\n w,\n np.array([[0., 0., 0.], well_w])\n )\n\n for args in [\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)",
"def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit",
"def set_normal_free_energy(self):\n\t\t\n\t\tself.eps_base = self.mu_eps + self.normal_eps_tuning_prefactor* \\\n\t\t\t\t\t\tsp.exp(-(1.*sp.arange(self.Mm))**2.0/(2.0* \\\n\t\t\t\t\t\tself.normal_eps_tuning_width)**2.0)\n\t\t\t\t\t\t\n\t\tself.eps_base += random_matrix(self.Mm, params=[0, self.sigma_eps], \n\t\t\t\t\t\t\t\t\t\tseed=self.seed_eps)\n\t\t\n\t\t# If dual signal, use the average of the FULL signal nonzero components\n\t\tif self.Kk_split == 0:\n\t\t\tself.eps = self.WL_scaling*sp.log(self.mu_Ss0) + self.eps_base \n\t\telse:\n\t\t\tself.eps = self.WL_scaling*sp.log(sp.average(self.Ss\\\n\t\t\t\t\t\t\t[self.Ss != 0])) + self.eps_base\n\t\t\n\t\t# Apply max epsilon value to each component\n\t\tfor iM in range(self.Mm):\n\t\t\tif self.eps[iM] > self.max_eps:\n\t\t\t\tself.eps[iM] = self.max_eps\n\t\t\tif self.eps[iM] < self.min_eps:\n\t\t\t\tself.eps[iM] = self.min_eps",
"def testEpsK1Changes(self):\n with self.test_context() as session:\n initial_eps = 1e-3\n num_classes = 5\n rm = gpflow.likelihoods.RobustMax(num_classes, initial_eps)\n\n expected_eps_k1 = initial_eps / (num_classes - 1.)\n actual_eps_k1 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k1, actual_eps_k1)\n\n new_eps = 0.412\n rm.epsilon.assign(new_eps, session=session)\n expected_eps_k2 = new_eps / (num_classes - 1.)\n actual_eps_k2 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k2, actual_eps_k2)",
"def test_amplitude_damping_error_full_0state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=False)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal",
"def test_reset(self):\n self.p.C[0][0] = np.inf\n self.step()",
"def _sigma_ep(self,gam,eps):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self._sigma_1(gam,eps)",
"def test_amplitude_damping_error_full_0state_canonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=True)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def calibrate_nopp(fundamental):\n thresh = 0.09682\n m1 = -0.1014\n b1 = 0.2839\n m2 = -0.065\n b2 = 0.21\n state1 = (fundamental < thresh)\n\n cal_fun = copy.copy(fundamental)\n cal_fun[state1] = 10**((fundamental[state1] - b1) / m1)\n cal_fun[~state1] = 10**((fundamental[~state1] - b2) / m2)\n return cal_fun",
"def psd_estimate(src_file, type):\n\n #Open file\n with NWBHDF5IO(src_file, mode='r+', load_namespaces=True) as io:\n nwb = io.read()\n\n #Source ElectricalSeries\n if type=='raw':\n data_obj = nwb.acquisition['ElectricalSeries']\n elif type=='preprocessed':\n data_obj = nwb.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed']\n\n nChannels = data_obj.data.shape[1]\n nSamples = data_obj.data.shape[0]\n fs = data_obj.rate\n #Welch - window length as power of 2 and keeps dF~0.05 Hz\n dF = .05 #Frequency bin size\n win_len_welch = 2**(np.ceil(np.log2(fs/dF)).astype('int')) #dF = fs/nfft\n #FFT - using a power of 2 number of samples improves performance\n nfft = int(2**(np.floor(np.log2(nSamples)).astype('int')))\n fx_lim = 200.\n for ch in np.arange(nChannels): # Iterate over channels\n trace = data_obj.data[:, ch]\n fx_w, py_w = sgn.welch(trace, fs=fs, nperseg=win_len_welch)\n fx_f, py_f = sgn.periodogram(trace, fs=fs, nfft=nfft)\n #saves PSD up to 200 Hz\n py_w = py_w[fx_w < fx_lim]\n fx_w = fx_w[fx_w < fx_lim]\n py_f = py_f[fx_f < fx_lim]\n fx_f = fx_f[fx_f < fx_lim]\n if ch==0:\n PY_welch = py_w.reshape(-1,1)\n PY_fft = py_f.reshape(-1,1)\n else:\n PY_welch = np.append(PY_welch, py_w.reshape(-1,1), axis=1)\n PY_fft = np.append(PY_fft, py_f.reshape(-1,1), axis=1)\n\n #Electrodes\n elecs_region = nwb.electrodes.create_region(name='electrodes',\n region=np.arange(nChannels).tolist(),\n description='all electrodes')\n\n #PSD shape: ('frequency', 'channel')\n spectrum_module_welch = Spectrum(name='Spectrum_welch_'+type,\n frequencies=fx_w,\n power=PY_welch,\n source_timeseries=data_obj,\n electrodes=elecs_region)\n\n spectrum_module_fft = Spectrum(name='Spectrum_fft_'+type,\n frequencies=fx_f,\n power=PY_fft,\n source_timeseries=data_obj,\n electrodes=elecs_region)\n \n # Processing module\n try: # if ecephys module already exists\n ecephys_module = nwb.processing['ecephys']\n except: # creates ecephys ProcessingModule\n ecephys_module = ProcessingModule(name='ecephys',\n description='Extracellular electrophysiology data.')\n # Add module to NWB file\n nwb.add_processing_module(ecephys_module)\n print('Created ecephys')\n ecephys_module.add_data_interface(spectrum_module_welch)\n ecephys_module.add_data_interface(spectrum_module_fft)\n\n io.write(nwb)\n print('Spectrum_welch_'+type+' added to file.')\n print('Spectrum_fft_'+type+' added to file.')",
"def eps_actual(self, eps_actual):\n\n self._eps_actual = eps_actual",
"def test_amplitude_damping_error_full_1state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=1,\n canonical_kraus=False)\n targets = [np.diag([0, 1]), np.array([[0, 0], [1, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_no_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = False\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertAlmostEqual(np.linalg.norm(self.T.i_ampa), 0.0)\n self.assertAlmostEqual(np.linalg.norm(self.T.i_nmda), 0.0)",
"def test_hydro_precision_loss():\n frequency_range = np.arange(10000, 800000, 200000)\n pars = [frequency_range, 187, 0.000464748 * 100, 4.09335e-08, 4.88e-6, 997, 997, None]\n reference_psd = np.array([9.82828137e-12, 8.11392808e-16, 8.63496544e-17, 2.24961617e-17])\n np.testing.assert_allclose(passive_power_spectrum_model_hydro(*pars), reference_psd)",
"def test_double_ended_ols_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass",
"def test_double_ended_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n dalpha = dalpha_p - dalpha_m\n alpha2 = x * dalpha\n\n # to ensure the st, rst, ast, rast were correctly defined.\n np.testing.assert_allclose(alpha2, alpha, atol=1e-15, rtol=0)\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.35 * cable_len)],\n \"warm\": [slice(0.67 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-12,\n ast_var=1e-12,\n rst_var=1e-12,\n rast_var=1e-12,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=9)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)\n\n pass",
"def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()",
"def _semi_relativistic_loss(eps):\n P = Pcoef * np.imag(1./eps) / np.real(Theta**2.+ThetaE**2.)\n return P",
"def get_delta_S(sigma_t,q_t,target_eps=1.0,nx=1E6,L=20.0):\n\n nx = int(nx)\n\n tol_newton = 1e-10 # set this to, e.g., 0.01*target_delta\n\n dx = 2.0*L/nx # discretisation interval \\Delta x\n x = np.linspace(-L,L-dx,nx,dtype=np.complex128) # grid for the numerical integration\n\n fx_table=[]\n F_prod=np.ones(x.size)\n\n ncomp=sigma_t.size\n\n if(q_t.size != ncomp):\n print('The arrays for q and sigma are of different size!')\n return float('inf')\n\n for ij in range(ncomp):\n\n sigma=sigma_t[ij]\n q=q_t[ij]\n\n # Evaluate the PLD distribution,\n # This is the case of substitution relation (subsection 5.2)\n c = q*np.exp(-1/(2*sigma**2))\n ey = np.exp(x)\n term1=(-(1-q)*(1-ey) + np.sqrt((1-q)**2*(1-ey)**2 + 4*c**2*ey))/(2*c)\n term1=np.maximum(term1,1e-16)\n Linvx = (sigma**2)*np.log(term1)\n\n sq = np.sqrt((1-q)**2*(1-ey)**2 + 4*c**2*ey)\n nom1 = 4*c**2*ey - 2*(1-q)**2*ey*(1-ey)\n term1 = nom1/(2*sq)\n nom2 = term1 + (1-q)*ey\n nom2 = nom2*(sq+(1-q)*(1-ey))\n dLinvx = sigma**2*nom2/(4*c**2*ey)\n\n ALinvx = (1/np.sqrt(2*np.pi*sigma**2))*((1-q)*np.exp(-Linvx*Linvx/(2*sigma**2)) +\n q*np.exp(-(Linvx-1)*(Linvx-1)/(2*sigma**2)))\n\n fx = np.real(ALinvx*dLinvx)\n half = int(nx/2)\n\n # Flip fx, i.e. fx <- D(fx), the matrix D = [0 I;I 0]\n temp = np.copy(fx[half:])\n fx[half:] = np.copy(fx[:half])\n fx[:half] = temp\n\n FF1 = np.fft.fft(fx*dx) # Compute the DFFT\n F_prod = F_prod*FF1\n\n # first jj for which 1-exp(target_eps-x)>0,\n # i.e. start of the integral domain\n jj = int(np.floor(float(nx*(L+np.real(target_eps))/(2*L))))\n\n # Compute the inverse DFT\n cfx = np.fft.ifft((F_prod/dx))\n\n # Flip again, i.e. cfx <- D(cfx), D = [0 I;I 0]\n temp = np.copy(cfx[half:])\n cfx[half:] = cfx[:half]\n cfx[:half] = temp\n\n # Evaluate \\delta(target_eps) and \\delta'(target_eps)\n exp_e = 1-np.exp(target_eps-x[jj+1:])\n integrand = exp_e*cfx[jj+1:]\n sum_int=np.sum(integrand)\n delta = sum_int*dx\n\n\n print('DP-delta (in S-relation) after ' + str(int(ncomp)) + ' compositions defined by sigma and q arrays:' + str(np.real(delta)) + ' (epsilon=' + str(target_eps) + ')')\n return np.real(delta)",
"def eps(newEps=None):\n\n global _eps\n if newEps is not None:\n _eps = newEps\n return _eps",
"def test_gradient_convergence(self):\n pass",
"def diff_1st_fwrdbwrd(fp, fm, eps):\n \n return (fp - fm)/eps",
"def diff_1st_central(fp, fm, eps):\n \n return (fp - fm)/(2.0*eps)",
"def test_amplitude_damping_error_full_1state_canonical(self):\n error = amplitude_damping_error(1, excited_state_population=1,\n canonical_kraus=True)\n targets = [np.diag([0, 1]), np.array([[0, 0], [1, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def eps(self):\n return self.eps_mask*self.eps_scheduler.value",
"def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)",
"def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)",
"def test_double_ended_ols_wls_fix_alpha_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.),\n fix_alpha=(alpha, np.zeros_like(alpha)))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_gamma=(gamma, 0.),\n fix_alpha=(alpha, np.zeros_like(alpha)))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=5)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=5)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass",
"def _perturbInPlaceSoft(self):\n kevRandom = KevRandom()\n if random.random() < 0.5:\n newThreshold = -1\n while newThreshold < 0 or newThreshold > 1:\n newThreshold = self.threshold + kevRandom.laplacian() #* 0.1\n self.threshold = newThreshold\n else:\n self.beta += kevRandom.laplacian() #* 0.1"
]
| [
"0.544045",
"0.5354141",
"0.52479935",
"0.52349603",
"0.52272445",
"0.51748747",
"0.51745343",
"0.5129748",
"0.5118663",
"0.5110409",
"0.50291836",
"0.5009186",
"0.5007034",
"0.4982783",
"0.4966813",
"0.49501437",
"0.49423245",
"0.49239942",
"0.4920314",
"0.4920293",
"0.49141216",
"0.4911037",
"0.49079007",
"0.4882082",
"0.4873606",
"0.48622233",
"0.48556712",
"0.48545742",
"0.48453772",
"0.4837297"
]
| 0.5363968 | 1 |
Dada uma coordenada da matriz (lin,col) transforma em coordenada Turtle | def em_coord_turtle(lin, col, dim, tam_celula):
meio = dim // 2
x = (col - meio) * tam_celula
y = (meio - lin) * tam_celula
return x, y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coordinates(self):",
"def position(t):\n return c + tangent_vec * 7 * t ** 2",
"def ra2xy(self, ra):\n return -math.sin(ra), math.cos(ra)",
"def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()",
"def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)",
"def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec",
"def coord (i, j):\r\n return j, i",
"def pixel2coord(tf, x, y):\n lat = tf[0] + x*tf[1] + y*tf[2]\n lon = tf[3] + x*tf[4] + y*tf[5]\n\n return lat, lon",
"def transform_point(p,R,t):\r\n x = R[0][0]*p[0]+R[0][1]*p[1]+t[0]\r\n y = R[1][0]*p[0]+R[1][1]*p[1]+t[1]\r\n return [x,y]",
"def translation(coord, lat, lon):\n ty = merc_y(lat) - 73.43 # Y translation\n tx = merc_x(lon) - 57.55 # X translation\n for i in range(len(coord)):\n for j in range(len(coord[i])):\n coord[i][j][0], coord[i][j][1] = coord[i][j][0] + tx, coord[i][j][1] + ty\n return coord",
"def c( self , y , r , t ):\n \n u = self.trajectory.t2u( t )\n \n return u",
"def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)",
"def xytors(x,y):\n\n from math import sqrt\n\n L1 = (sqrt(3.0)*y + 1.0)/3.0\n L2 = (-3.0*x - sqrt(3.0)*y + 2.0)/6.0\n L3 = ( 3.0*x - sqrt(3.0)*y + 2.0)/6.0\n\n r = -L2 + L3 - L1\n s = -L2 - L3 + L1\n\n return r,s",
"def transform(self, lens):\n coords_t = np.zeros(self.coords.shape)\n coords_t[:,0] = (\n self.coords[:,0] * lens.nx / (lens.xmax - lens.xmin))\n coords_t[:,1] = (\n self.coords[:,1] * lens.ny / (lens.ymax - lens.ymin))\n return coords_t",
"def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def create_torus(minr=0.5, maxr=1.0, lat=30, lng=30, color=COLOR_WHITE):\n if lat >= 3 and lng >= 3:\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidTorus(minr, maxr, lat, lng)\n except:\n if not _ERRS[2]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidTorus\")\n _ERRS[2] = True\n glPopMatrix()\n glEndList()\n return obj\n else:\n raise Exception(\n \"La latitud y longitud de la figura deben ser mayores a 3\")",
"def draw_coord(turt, coords, chromo):\r\n turt.reset()\r\n turt.goto(coords[0].pos) # starts at 0\r\n turt.color(\"lawngreen\")\r\n turt.pd()\r\n for c in chromo:\r\n i = int(c)\r\n turt.goto(coords[i].pos)\r\n turt.goto(coords[0].pos) # ends at 0\r\n turt.pu()",
"def position(self, t):\n return vector_add(self.origin, self.direction.scale(t))",
"def transform_coords(x, y, w, h, nw, nh):\r\n return ((((x / w) - 0.5) * nw), (((h - y) / h) - 0.5) * nh)",
"def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]",
"def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr",
"def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)",
"def line():\n tt.left(90)\n tt.down()\n tt.forward(50)\n tt.up()\n tt.right(90)\n tt.forward(10)\n tt.right(90)\n tt.forward(50)\n tt.left(90)",
"def det_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec",
"def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)",
"def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z",
"def _to_world_coord(self, x, y):\n maze = self._get_maze()\n y = maze.shape[1] - y - 1\n return (float(x) + .5) * _MAZE_CELL_SIZE, (float(y) + .5) * _MAZE_CELL_SIZE",
"def to_world(self, x, y, **kwargs):",
"def set_relative_coordinates(self, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.xi = []\r\n self.yi = []\r\n\r\n # alias:\r\n NT = Turbine.NT\r\n N = Turbine.N\r\n \r\n # append an empty array of N garbage elements:\r\n for i in range(int(NT)):\r\n self.xi.append(np.zeros((N, ), dtype=float))\r\n self.yi.append(np.zeros((N, ), dtype=float))\r\n \r\n # fill each element of the list with the relative coordinates respect \r\n # to the i-th turbine:\r\n for i, turbine in enumerate(turbines):\r\n self.xi[i] = self.x - turbine.x0\r\n self.yi[i] = self.y - turbine.y0"
]
| [
"0.60562533",
"0.5994425",
"0.59405386",
"0.594034",
"0.5853108",
"0.57843846",
"0.5775709",
"0.576947",
"0.5756959",
"0.57361114",
"0.5690496",
"0.5659488",
"0.5646095",
"0.5643621",
"0.5636459",
"0.5635302",
"0.5627718",
"0.56259024",
"0.562121",
"0.5606195",
"0.56057143",
"0.56011933",
"0.5590582",
"0.55885917",
"0.5579039",
"0.55736476",
"0.5568421",
"0.5546879",
"0.5533536",
"0.55334973"
]
| 0.7198467 | 0 |
Recreates database table and populates with seed data. Know that this will reset your db to defaults | def recreate():
from data.seed import Seed
if click.confirm("Are you sure you want to lose all your data"):
db.drop_all()
db.create_all()
Seed().data() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recreate_db():\n drop_db()\n create_db()\n populate_db()",
"def reset_db():\n db.drop_all()\n _init_db()",
"def recreate_db():\n\n print(\"will reinit db - FAKE\")\n db.create_tables([Message, Instance])\n\n # no need to prepare a sample record.\n # use http to create init request instead.\n #inject_record()",
"def recreate_db():\n db.session.execute('SET FOREIGN_KEY_CHECKS=0;')\n db.session.execute('DROP TABLE IF EXISTS logs;')\n db.session.execute('DROP TABLE IF EXISTS employees;')\n db.session.execute('DROP TABLE IF EXISTS sales;')\n db.session.execute('DROP TABLE IF EXISTS plants;')\n db.session.execute('DROP TABLE IF EXISTS products;')\n db.session.execute('DROP TABLE IF EXISTS suppliers;')\n db.session.execute('DROP TABLE IF EXISTS orders;')\n db.session.execute('DROP TABLE IF EXISTS contacts;')\n db.session.execute('DROP TABLE IF EXISTS varieties;')\n db.session.execute('DROP TABLE IF EXISTS species;')\n db.session.execute('DROP TABLE IF EXISTS genera;')\n db.session.execute('DROP TABLE IF EXISTS families;')\n db.drop_all()\n db.create_all()\n db.session.commit()\n fakePlant = Plant(living = True)\n db.session.add(fakePlant)\n db.session.commit()\n db.session.delete(fakePlant)\n db.session.execute('SET FOREIGN_KEY_CHECKS=1;')\n db.session.commit()",
"def reset_database(self):\n\n self.db.reset_database()",
"def reset_db():\n\n webapp.dbsql.drop_all()\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()",
"def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()",
"def reset():\n teardown_db()\n build()",
"def rebuild_db():\n delete_db()\n create_db()\n insert_db()",
"def recreate_db():\n drop_db()\n create_db()",
"def reset_db():\n\n metadata = sa.MetaData()\n metadata.reflect(engine)\n for tbl in reversed(metadata.sorted_tables):\n tbl.drop(engine)\n create_tables()",
"def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')",
"def clean_up():\n drop_all_tables()\n create_all()",
"def initialize_empty_database(self):\r\n Base.metadata.create_all(self.engine)",
"def reset(self):\n self._execute(\"DELETE FROM collection_table\")\n self._execute(\"DELETE FROM keyword_table\")",
"def resetdb():\n\n if database_exists(DB_URL):\n print('Deleting database.')\n drop_database(DB_URL)\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')",
"def reset_the_db(_step):\r\n reset_data(None)",
"def populate_db():\n try:\n users = [\n User(name=u'admin', role=1),\n ]\n db.session.add_all(users)\n db.session.commit()\n except:\n db.session.rollback()\n raise Exception(\"Failed to populate the database\")\n finally:\n db.session.close()",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def clearDatabase():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)",
"def reset_dbs():\n db.answering_users.remove({})\n db.answered_users.remove({})",
"def reset_db():\n from alembic.command import downgrade, upgrade\n from alembic.config import Config as AlembicConfig\n config = AlembicConfig('alembic.ini')\n downgrade(config, 'base')\n upgrade(config, 'head')\n print('Database has been reset')",
"def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()",
"def reset(self) -> None:\n call_command('migrate', verbosity=0, database=self._database)",
"def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()",
"def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables",
"def recreate_db(self, run=False):\n if run:\n db_schema = open(self.db_schema_file).read().splitlines()\n for s in db_schema:\n t = s.strip()\n if len(t):\n self.cur.execute(t)",
"def create_db_from_scratch():\n if os.path.isfile('data.db'):\n os.remove('data.db')\n Base.metadata.create_all(engine)",
"def initialise_db(prefill=True, clear=False):\n db.DB.create_all()\n\n if clear:\n prompt = raw_input(\n 'Are you sure you wish to clear the entire database? '\n )\n\n if prompt.lower() in ['yes', 'y']:\n models.Affiliation.query.delete()\n models.Announcement.query.delete()\n models.Battels.query.delete()\n models.CardTransaction.query.delete()\n models.College.query.delete()\n models.Log.query.delete()\n models.Photo.query.delete()\n models.Statistic.query.delete()\n models.Ticket.query.delete()\n models.Transaction.query.delete()\n models.TransactionItem.query.delete()\n models.User.query.delete()\n models.Voucher.query.delete()\n models.Waiting.query.delete()\n\n if prefill:\n db.DB.session.add_all(static.COLLEGES)\n db.DB.session.add_all(static.AFFILIATIONS)\n db.DB.session.commit()",
"def reset():\n DB.drop_all()\n DB.create_all()\n for list_item in list_tuple:\n # id, backers_count, category, goal, pledged, spotlight, state, blurb_length, goal_in_usd, campaign_duration, sub_category\n db_record = Record(id=list_item[0], backers_count=list_item[1], category = list_item[2], \n pledged = list_item[3], state = list_item[4],\n blurb_length = list_item[5], goal_in_usd = list_item[6], \n campaign_duration = list_item[7], sub_category = list_item[8]) \n DB.session.add(db_record)\n DB.session.commit()\n users = Record.query.all() # SQL equivalent: `SELECT * FROM user;`\n return render_template(\"base.html\", title='Home', users=users)"
]
| [
"0.7524338",
"0.75184804",
"0.7442109",
"0.7368387",
"0.7292522",
"0.72371817",
"0.72340506",
"0.7040728",
"0.6964414",
"0.6949868",
"0.690078",
"0.6897268",
"0.6821358",
"0.6815195",
"0.6735268",
"0.67309934",
"0.6709054",
"0.6651812",
"0.6625447",
"0.6622221",
"0.66213125",
"0.65740937",
"0.6554215",
"0.65485585",
"0.64961725",
"0.64777446",
"0.6461097",
"0.6454051",
"0.6437257",
"0.63867486"
]
| 0.78233486 | 0 |
Get attribute values by column name from a shapefile | def get_shp_attribute_by_name(shpfname, attrname):
driver = ogr.GetDriverByName("ESRI Shapefile")
vector = driver.Open(shpfname, 0)
layer = vector.GetLayer(0)
f = layer.GetFeature(0)
val = []
for i in range(layer.GetFeatureCount()):
f = layer.GetFeature(i)
val.append(f.GetField(attrname))
return val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_column_ontology_details(self, column_name):\n ontology_details = []\n \n try:\n con = self.getMetadataDatabaseConnection()\n ontologies = con.cursor()\n con.cursor().callproc('qiime_assets.get_column_ontologies', [column_name, ontologies])\n query_results=[]\n for row in ontologies:\n # row[0] = short_name\n # row[1] = bioportal_id\n # row[2] = ontology_branch_id\n ontology_details.append((row[0], row[1], row[2]))\n return ontology_details\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def get_attributes(file_name, res):\r\n # Open file in read mode to get indexes of target and replace\r\n with open(file_name + \".txt\") as f:\r\n content = f.readline()\r\n target = res[0].split()[1][:-1]\r\n target2 = res[0].split()[2]\r\n content = content.split()\r\n # Placement of column to update\r\n index1 = content.index(target) // 2\r\n index2 = content.index(target2) // 2\r\n indexes = {index1, index2}\r\n attributes = attributes_picker(index1, index2)\r\n return attributes, indexes",
"def get_specific_col_data( self, columns):\n headers = []\n for i in range(len(columns)):\n headers.append(self.header2col[columns[i]])\n return self.data[:,headers]",
"def __getitem__(self, col_name):\n return self.data[self.cols[col_name]]",
"def get_attributes(self, shape):\n attributes = {}\n identifier_names = [i.name for i in self.identifiers]\n\n for name, member in shape.members.items():\n snake_cased = xform_name(name)\n if snake_cased in identifier_names:\n # Skip identifiers, these are set through other means\n continue\n snake_cased = self._get_name(\n 'attribute', snake_cased, snake_case=False\n )\n attributes[snake_cased] = (name, member)\n\n return attributes",
"def __getattr__(self, col):\n return self._obj[col].to_numpy()",
"def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta",
"def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values",
"def _get_column_values(self, col: str) -> ndarray:\n dtype, loc = self._get_col_dtype_loc(col) # type: str, int\n return self._data[dtype][:, loc]",
"def query_header(self, name):\n if name not in self.file.root.photons.photontable.attrs:\n raise KeyError(name)\n # the implementation does not like missing get calls\n return getattr(self.file.root.photons.photontable.attrs, name)",
"def _get_column(cls, name):\r\n return cls._columns[name]",
"def get_labels_by_attr_name(self, attr_name):\n # get index of attribute\n try:\n attr_index = self.attr_names.index(attr_name)\n except:\n print('unidentified attribute name...!!!')\n\n return self.attr_data[:, attr_index]",
"def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))",
"def input_shape(file_name):\n data = pd.read_csv(file_name,header=None)\n x_data = data.iloc[:,0]\n y_data = data.iloc[:,1]\n x = np.array(x_data).T\n y = np.array(y_data).T\n return x,y",
"def _get_column(self, name):\r\n return self.column(name)",
"def getFeatureColumnData(featurefile):\n featurecoldata = pd.read_csv(featurefile, sep=\"\\t\", header=None).values\n features = []\n for i in range(0, len(featurecoldata)):\n features.extend(range(featurecoldata[i,0], featurecoldata[i,1]))\n return features",
"def get_data_column(self, columnname):\n c_index = self._get_header_index(columnname)\n\n column_data = []\n for row in self.data:\n column_data.append(row[c_index])\n \n return column_data",
"def read_col(self, colname):\n self.open_msfile()\n data = self.tb.getcol(colname)\n self.close_msfile()\n return data",
"def queryAttributeNames(name):\n\n header, rows = querySciDB(\"attributes(%s)\" % name)\n return [row[1].translate(None, \"\\\"\") for row in rows]",
"def readData(fname):\n pd = pandas.read_csv(fname)\n return [numpy.array(pd[colname]) for colname in pd.columns[1:]]",
"def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]",
"def readAttributesFile(self, filepath):\n raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=\",\", filling_values=0, dtype=None)\n data = [list(item)[1:] for item in raw_data]\n\n self.attributeMatrix = np.asmatrix(data)\n n = self.attributeMatrix.shape[1]\n self.location = self.attributeMatrix[:, 0:2]\n self.location = self.location.astype('float')\n self.pop = self.attributeMatrix[:, 2:n].astype('int')\n # self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0\n self.n_group = n-2\n self.n_location = self.attributeMatrix.shape[0]\n self.pop_sum = np.sum(self.pop, axis=1)\n self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)\n self.tract_id = self.tract_id.reshape((self.n_location, 1))\n\n return self.attributeMatrix",
"def get_column_from_file(file_name, column_number):\n\n file = open(file_name, 'r')\n column_values = []\n for line in file:\n row_values = [int(value.strip()) for value in line.split()]\n column_values.append(row_values[column_number])\n\n return column_values",
"def extract_vectors_with_id_col(row):\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.pcaFeatures.values)",
"def get_csv_data(filepath):\n # Read the csv file into a pands dataframe\n csv_df = pd.read_csv(filepath)\n\n # Read the columns into coordinate arrays\n x = csv_df.iloc[:, 0]\n y = csv_df.iloc[:, 1]\n return x, y",
"def retrieve_shapely(osm_path,geoType,keyCol,**valConstraint):\n driver=ogr.GetDriverByName('OSM')\n data = driver.Open(osm_path)\n query = query_b(geoType,keyCol,**valConstraint)\n sql_lyr = data.ExecuteSQL(query)\n features =[]\n # cl = columns \n cl = ['osm_id'] \n for a in keyCol: cl.append(a)\n if data is not None:\n print('query is finished, lets start the loop')\n for feature in tqdm(sql_lyr):\n try:\n if feature.GetField(keyCol[0]) is not None:\n geom = loads(feature.geometry().ExportToWkb()) \n if geom is None:\n continue\n # field will become a row in the dataframe.\n field = []\n for i in cl: field.append(feature.GetField(i))\n field.append(geom) \n features.append(field)\n except:\n print(\"WARNING: skipped OSM feature\") \n else:\n print(\"ERROR: Nonetype error when requesting SQL. Check required.\") \n cl.append('geometry') \n if len(features) > 0:\n return geopandas.GeoDataFrame(features,columns=cl,crs={'init': 'epsg:4326'})\n else:\n print(\"WARNING: No features or No Memory. returning empty GeoDataFrame\") \n return geopandas.GeoDataFrame(columns=['osm_id','geometry'],crs={'init': 'epsg:4326'})",
"def get_column(row, cname):\n\n try:\n return row[_colmap[cname]]\n\n except KeyError:\n raise ValueError(\"Invalid column name: {0}!\".format(cname))",
"def _get_columns(cls, manifest_url):\n return [k[\"name\"] for k in\n json.loads(cls._read_s3_url(manifest_url))[\"schema\"][\"elements\"]]",
"def query_aisc(name, metric=False, version=None):\n name = name.upper()\n table = _aisc_table(metric, version)\n\n statement = \"SELECT * FROM {} WHERE UPPER(name)='{}';\".format(table, name)\n cursor = DB_CONNECTION.execute(statement)\n\n header = original_names(cursor.description)\n row = cursor.fetchone()\n\n if not row:\n raise ValueError('Shape {} not found.'.format(name))\n\n odict = {k: x for k, x in zip(header, row) if x is not None}\n\n return odict",
"def extract_data_from_all_xdm_schema(path: Path) -> Tuple[dict, dict]:\n with open(path, newline=\"\") as csvfile:\n reader = csv.DictReader(csvfile)\n\n columns_to_keep = [\"name\", \"datatype\", \"dataclass\"]\n data = {\n row[\"name\"]: {col: row[col] for col in columns_to_keep if col in row}\n for row in reader\n }\n xdm_rule_to_dtype = {\n k: v[\"datatype\"] for k, v in data.items() if \"datatype\" in v\n }\n xdm_rule_to_dclass = {\n k: v[\"dataclass\"] for k, v in data.items() if \"dataclass\" in v\n }\n\n return xdm_rule_to_dtype, xdm_rule_to_dclass"
]
| [
"0.60772455",
"0.56066895",
"0.5523452",
"0.55085415",
"0.54409367",
"0.5423855",
"0.53664464",
"0.53392047",
"0.53326654",
"0.5331746",
"0.53259516",
"0.5319814",
"0.5301684",
"0.52871823",
"0.526558",
"0.5256799",
"0.52234125",
"0.52222466",
"0.52016175",
"0.52005154",
"0.52000433",
"0.51809216",
"0.5178057",
"0.51766",
"0.51682013",
"0.5159276",
"0.5157397",
"0.51548785",
"0.51392484",
"0.51356405"
]
| 0.6711834 | 0 |
Method for viewing associated budget lines. | def view_budget_lines(self, cr, uid, ids, context=None):
ctx = context.copy()
ctx['default_line_id'] = ids[0]
ctx['allow_create'] = True
# Return view with budget lines
return {
'name': _('Budget lines'),
'domain': "[('line_id', 'in', %s)]" % ids,
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.balance.reporting.template.line.budget',
'context': ctx,
'type': 'ir.actions.act_window',
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")",
"def view(self):\n\n print('Here\\'s your expense and income records:\\n'+' '*3+'Category'+' '*7+\\\n 'Description'+' '*4+'Amount\\n'+'='*40)\n line = 1\n amount = self._initial_money\n for n in self._records:\n m = n.split() # m is a list in the form ['category', 'item', 'money']\n print(f'{line:<3}{m[0]:<15}{m[1]:<15}{m[2]}')\n amount += int(m[2])\n line += 1\n print('='*40 + f'\\nNow you have {amount} dollars.')",
"def complete(self, cr, uid, ids, context={}):\n budget_pool = self.pool.get('account.budget')\n budget_line_pool = self.pool.get('account.budget.lines')\n for r in self.browse(cr, uid, ids, context=context):\n if r.type=='transfer' and not r.line_ids:\n raise osv.except_osv(_('Error!'),_('You cannot complete Transfer Operations without any Budget line.'))\n if r.budget_type=='cash':\n budget_ids = budget_pool.search(cr, uid,[('analytic_account_id', '=', r.analytic_account_id.id), \n ('period_id', '=', r.period_id.id)], context=context)\n budget_line_id = budget_line_pool.search(cr, uid,[('general_account_id', '=', r.account_id.id), \n ('account_budget_id', 'in', tuple(budget_ids))], context=context)\n if budget_line_id:\n line=budget_line_pool.browse(cr, uid, budget_line_id, context=context)[0]\n if line.planned_amount+line.total_operation < line.cash_total_operation + r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( line.cash_total_operation+ r.amount,line.planned_amount+line.total_operation ,))\n if line.cash_residual_balance + r.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (r.amount, line.name, line.cash_residual_balance,))\n for e in r.line_ids:\n if line.planned_amount+line.total_operation < line.cash_total_operation - r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( e.cash_total_operation- r.amount,line.planned_amount+line.total_operation ,))\n if e.line_id.cash_residual_balance - e.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (e.amount, e.line_id.name, e.line_id.cash_residual_balance,))\n return self.write(cr, uid, ids,{'state':'complete','name': r.name == '/' and \n self.pool.get('ir.sequence').get(cr, uid, 'account.budget.operation') or \n r.name, 'amount': r.type=='increase' and r.amount or sum([l.amount for l in r.line_ids])}, context=context)\n \n return super(account_budget_operation, self).complete(cr, uid, ids, context=context)",
"def get(self, request, entity_pk, node_pk, *args, **kwargs):\n\n budget_items = BudgetItem.objects.timeline(node_pk, entity_pk)\n serializer = serializers.BudgetItemLinked(budget_items, many=True)\n return Response(serializer.data)",
"def _display_screen(self,ids,form,detailed):\n def _remove_noise_in_o2m():\n \"\"\"if the line is partially reconciled, then we must pay attention to display it only once and\n in the good o2m.\n This function returns True if the line is considered as noise and should not be displayed\n \"\"\"\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False\n cr=self.cr\n uid=self.uid\n #context=self.context\n ids=[ids]\n\n currency_pool = self.pool.get('res.currency')\n move_line_pool = self.pool.get('account.move.line')\n partner_pool = self.pool.get('res.partner')\n journal_pool = self.pool.get('account.journal')\n line_pool = self.pool.get('account.voucher.line')\n payment_display=self.pool.get('account.payment.display.credit')\n\n # Unlink all the records from the Customer Statement Table and Re write the Records once again...\n payment_ids = payment_display.search(cr,uid,[('partner_id','=',ids[0])])\n if payment_ids:\n payment_display.unlink(cr,uid,payment_ids)\n\n \n #set default values\n default = {\n 'value': {'line_dr_ids': [] ,'line_cr_ids': []},\n }\n\n #drop existing lines\n line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])]) or False\n if line_ids:\n line_pool.unlink(cr, uid, line_ids)\n\n partner = partner_pool.browse(cr, uid, ids[0], context=None)\n currency_id = partner.company_id.currency_id.id\n\n total_credit = 0.0\n total_debit = 0.0\n account_type = 'receivable'\n\n ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner.id)])\n\n #invoice_id = context.get('invoice_id', False)\n company_currency = partner.company_id.currency_id.id\n move_lines_found = []\n\n #order the lines by most old first\n ids.reverse()\n account_move_lines = move_line_pool.browse(cr, uid, ids, context=None)\n\n #compute the total debit/credit and look for a matching open amount or invoice\n for line in account_move_lines:\n if _remove_noise_in_o2m():\n continue\n\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency == price:\n move_lines_found.append(line.id)\n break\n total_credit += line.credit and line.amount_currency or 0.0\n total_debit += line.debit and line.amount_currency or 0.0\n\n #voucher line creation\n for line in account_move_lines:\n price=0\n if _remove_noise_in_o2m():\n continue\n\n if line.currency_id and currency_id == line.currency_id.id:\n amount_original = abs(line.amount_currency)\n amount_unreconciled = abs(line.amount_residual_currency)\n else:\n #always use the amount booked in the company currency as the basis of the conversion into the voucher currency\n amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0)\n amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual))\n line_currency_id = line.currency_id and line.currency_id.id or company_currency\n \n rs = {\n 'ref':line.ref,\n 'date':line.date,\n 'blocked':line.blocked,\n 'company_id':line.company_id.id, \n 'invoice_date': line.date_created,\n 'reference': line.name,\n 'partner_id':partner.id,\n 'state':line.state,\n 'reconcile_id':False, \n 'invoice_no':line.move_id.name,\n 'move_id':line.move_id,\n 'type': line.credit and 'dr' or 'cr',\n 'move_line_id':line.id,\n 'account_id':line.account_id.id,\n 'amount_original': amount_original,\n 'amount': (line.id in move_lines_found) and min(abs(price), amount_unreconciled) or 0.0,\n 'date':line.date,\n 'date_maturity':line.date_maturity,\n 'amount_unreconciled': amount_unreconciled,\n 'currency_id': line_currency_id,\n }\n price -= rs['amount']\n #in case a corresponding move_line hasn't been found, we now try to assign the voucher amount\n #on existing invoices: we split voucher amount by most old first, but only for lines in the same currency\n if not move_lines_found:\n if currency_id == line_currency_id:\n if line.credit:\n amount = min(amount_unreconciled, abs(total_debit))\n rs['amount'] = amount\n total_debit -= amount\n else:\n amount = min(amount_unreconciled, abs(total_credit))\n rs['amount'] = amount\n total_credit -= amount\n\n if rs['amount_unreconciled'] == rs['amount']:\n rs['reconcile'] = True\n\n if rs['type'] == 'cr':\n default['value']['line_cr_ids'].append(rs)\n else:\n default['value']['line_dr_ids'].append(rs)\n Fields=[]\n \n for data in default['value'].keys():\n for value in default['value'][data]:\n if value['type']=='cr':\n debit=value['amount_original']\n credit=debit-value['amount_unreconciled']\n \n else:\n # Here Customer Refunds , or Customer Excess Amount is Recorded..\n credit=value['amount_original']\n debit=credit-value['amount_unreconciled']\n value.update({'debit':debit,'credit':credit,'result':debit-credit})\n move_line_id=value['move_line_id']\n for ele in ['amount_unreconciled','move_line_id','type','amount_original','amount']:\n value.pop(ele)\n\n payment_display.create(cr, uid,value)\n\n #Writing the Fields to Table........\n value.update({'1':0.0,'2':0.0,'3':0.0,'4':0.0,'0':0.0,'due':0.0,'move_line_id':move_line_id})\n Fields.append(value) \n \n if detailed:\n for line in Fields:\n period=self.check_line(line['move_line_id'],detailed)\n if period in range(5):\n line.update({str(period):line['result']})\n if period=='due':\n line.update({'due':line['result']})\n else:\n for line in Fields:\n line.update({'due':line['result']})\n return Fields",
"def do_show(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(\"Contacts in the current book\\n\")\n\t\t\tself.cl.list_contacts()\n\t\telse:\n\t\t\tprint(\"To see contacts you need to open or create book\")",
"def display_draw(request, draw_id):\n bom_draw = MONGO.retrieve_draw(draw_id)\n draw_type = draw_factory.get_draw_name(bom_draw.draw_type)\n if bom_draw.check_read_access(request.user):\n prev_draw_data = bom_draw.__dict__.copy()\n draw_form = draw_factory.create_form(draw_type, prev_draw_data)\n return render(request, \"draws/display_draw.html\", {\"draw\": draw_form, \"bom\": bom_draw})\n else:\n return render(request, \"draws/secure_draw.html\", {\"bom\": bom_draw})",
"def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True",
"def _display_screen(self,ids,form,detailed):\n def _remove_noise_in_o2m():\n \"\"\"if the line is partially reconciled, then we must pay attention to display it only once and\n in the good o2m.\n This function returns True if the line is considered as noise and should not be displayed\n \"\"\"\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False\n cr=self.cr\n uid=self.uid\n #context=self.context\n ids=[ids]\n #print \"Ids Here The Test::::::::::: Payment Display:::::::::::\",ids\n currency_pool = self.pool.get('res.currency')\n move_line_pool = self.pool.get('account.move.line')\n partner_pool = self.pool.get('res.partner')\n journal_pool = self.pool.get('account.journal')\n line_pool = self.pool.get('account.voucher.line')\n payment_display=self.pool.get('account.payment.display.credit')\n\n # Unlink all the records from the Customer Statement Table and Re write the Records once again...\n payment_ids = payment_display.search(cr,uid,[('partner_id','=',ids[0])])\n if payment_ids:\n payment_display.unlink(cr,uid,payment_ids)\n\n \n #set default values\n default = {\n 'value': {'line_dr_ids': [] ,'line_cr_ids': []},\n }\n\n #drop existing lines\n line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])]) or False\n if line_ids:\n line_pool.unlink(cr, uid, line_ids)\n\n partner = partner_pool.browse(cr, uid, ids[0], context=None)\n currency_id = partner.company_id.currency_id.id\n\n total_credit = 0.0\n total_debit = 0.0\n account_type = 'receivable'\n\n ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner.id)])\n\n #invoice_id = context.get('invoice_id', False)\n company_currency = partner.company_id.currency_id.id\n move_lines_found = []\n\n #order the lines by most old first\n ids.reverse()\n account_move_lines = move_line_pool.browse(cr, uid, ids, context=None)\n\n #compute the total debit/credit and look for a matching open amount or invoice\n for line in account_move_lines:\n if _remove_noise_in_o2m():\n continue\n\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency == price:\n move_lines_found.append(line.id)\n break\n total_credit += line.credit and line.amount_currency or 0.0\n total_debit += line.debit and line.amount_currency or 0.0\n\n #voucher line creation\n for line in account_move_lines:\n price=0\n if _remove_noise_in_o2m():\n continue\n\n if line.currency_id and currency_id == line.currency_id.id:\n amount_original = abs(line.amount_currency)\n amount_unreconciled = abs(line.amount_residual_currency)\n else:\n #always use the amount booked in the company currency as the basis of the conversion into the voucher currency\n amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0)\n amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual))\n line_currency_id = line.currency_id and line.currency_id.id or company_currency\n \n rs = {\n 'ref':line.ref,\n 'date':line.date,\n 'blocked':line.blocked,\n 'company_id':line.company_id.id, \n 'invoice_date': line.date_created,\n 'reference': line.name,\n 'partner_id':partner.id,\n 'state':line.state,\n 'reconcile_id':False, \n 'invoice_no':line.move_id.name,\n 'move_id':line.move_id,\n 'type': line.credit and 'dr' or 'cr',\n 'move_line_id':line.id,\n 'account_id':line.account_id.id,\n 'amount_original': amount_original,\n 'amount': (line.id in move_lines_found) and min(abs(price), amount_unreconciled) or 0.0,\n 'date':line.date,\n 'date_maturity':line.date_maturity,\n 'amount_unreconciled': amount_unreconciled,\n 'currency_id': line_currency_id,\n }\n price -= rs['amount']\n #in case a corresponding move_line hasn't been found, we now try to assign the voucher amount\n #on existing invoices: we split voucher amount by most old first, but only for lines in the same currency\n if not move_lines_found:\n if currency_id == line_currency_id:\n if line.credit:\n amount = min(amount_unreconciled, abs(total_debit))\n rs['amount'] = amount\n total_debit -= amount\n else:\n amount = min(amount_unreconciled, abs(total_credit))\n rs['amount'] = amount\n total_credit -= amount\n\n if rs['amount_unreconciled'] == rs['amount']:\n rs['reconcile'] = True\n\n if rs['type'] == 'cr':\n default['value']['line_cr_ids'].append(rs)\n else:\n default['value']['line_dr_ids'].append(rs)\n Fields=[]\n \n for data in default['value'].keys():\n for value in default['value'][data]:\n if value['type']=='cr':\n debit=value['amount_original']\n credit=debit-value['amount_unreconciled']\n \n else:\n # Here Customer Refunds , or Customer Excess Amount is Recorded..\n credit=value['amount_original']\n debit=credit-value['amount_unreconciled']\n value.update({'debit':debit,'credit':credit,'result':debit-credit})\n move_line_id=value['move_line_id']\n for ele in ['amount_unreconciled','move_line_id','type','amount_original','amount']:\n value.pop(ele)\n\n payment_display.create(cr, uid,value)\n\n #Writing the Fields to Table........\n value.update({'1':0.0,'2':0.0,'3':0.0,'4':0.0,'0':0.0,'due':0.0,'move_line_id':move_line_id})\n Fields.append(value) \n \n if detailed:\n for line in Fields:\n period=self.check_line(line['move_line_id'],detailed)\n if period in range(5):\n line.update({str(period):line['result']})\n if period=='due':\n line.update({'due':line['result']})\n else:\n for line in Fields:\n line.update({'due':line['result']})\n return Fields",
"def lines(self):\n return (\n self._line_queryset.select_related(\"experimenter__userprofile\", \"updated\")\n .annotate(strain_names=ArrayAgg(\"strains__name\"))\n .prefetch_related(Prefetch(\"strains\", to_attr=\"strain_list\"))\n )",
"def do_show(self, line):\n\t\tif not(self.db is None):\n\t\t\tfor contact in self.db.contact.find():\n\t\t\t\tpprint.pprint(contact)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")",
"def investment_line(self):\n inv, marks = self._get_marks()\n fig = plt.figure(figsize=(4, 2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n investmentValues = inv['Invested']\n #investmentValues = pd.Series([0], index=[investmentValues.index[0]-timedelta(1)]).append(investmentValues)\n ax.plot(investmentValues, lw=1.2, color=\"blue\", label='Invested', marker=\"o\", markersize=3, markerfacecolor=\"grey\")\n ax.set_xlabel('Time')\n ax.set_ylabel('Investments (€)')\n ax.set_title('Investment Amount (€) - Daily')\n ax.xaxis.set_major_locator(dates.MonthLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b-%Y'))\n for x, y, mark in zip(marks.index, marks['Invested'], marks['Marks']):\n a = ax.get_ylim()\n if x == marks.index[0]:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 80), y + (a[1]-a[0])/35), fontsize=5)\n else:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 50), y - (a[1]-a[0])/35), fontsize=5)\n ax.grid(True)\n fig.autofmt_xdate()\n ax.legend()\n return fig, ax",
"def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')",
"def summary_line_and_description():",
"def print_transactions_for_review(self, budget: Budget) -> None:\n print(f'Please review the following transactions in the {budget.name} '\n f'budget:')\n transactions = self.get_transactions_by_budget(budget.category)\n for transaction in transactions:\n print(transaction)",
"def view_bank_account_details(self) -> None:\n Menu.prompt_view_bank_account_details()\n print(\"Bank Account Details:\")\n print(self.user.account)\n\n for tx_num, tx_details in \\\n self.user.tx_manager.transaction_records.items():\n print(f\"\\nTransaction #{tx_num}:\\n\"\n f\"{tx_details}\")\n\n print(f\"\\nSpending Summary:\")\n print(f\" Starting Bank Balance: \"\n f\"{'{:.2f}'.format(self.user.account.starting_balance)}\")\n print(f\" Total Transactions Amount: \"\n f\"{'{:.2f}'.format(self.user.tx_manager.calc_total_spent())}\")\n print(f\" Closing Bank Account Balance: \"\n f\"{'{:.2f}'.format(self.user.account.current_balance)}\")",
"def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })",
"def __ui_list_disciplines(self):\n try:\n print(str(self.__discipline_controller))\n\n except RepositoryException as re:\n print(re)\n return",
"def done(self, cr, uid, ids, context=None):\n budget_line = self.pool.get('account.budget.lines')\n budget_line_id = False \n for r in self.browse(cr, uid, ids, context=context):\n to = {'analytic_account': r.analytic_account_id.id,\n 'account_id': r.account_id.id,\n 'period_id': r.period_id.id,\n 'company': r.company_id.id,\n 'amount' : r.amount\n }\n \n budget_line_id ,history_ids=budget_line.transfer(cr, uid, {'type':r.type, 'budget_type':r.budget_type, 'line_ids': r.line_ids, 'to':to, 'reference':self._name+','+str(r.id)}, context=context)\n return self.write(cr, uid, ids,{'state':'done', 'budget_line':budget_line_id}, context=context)",
"def action_view_subcontract(self):\n action = self.env.ref('subcontract.subcontract_rfq')\n result = action.read()[0]\n\n # Remvove the context since the action basically display RFQ and not PO.\n result['context'] = {}\n order_line_ids = self.env['subcontract.order.line'].search([('orderpoint_id', '=', self.id)])\n subcontract_ids = order_line_ids.mapped('order_id')\n\n result['domain'] = \"[('id','in',%s)]\" % (subcontract_ids.ids)\n\n return result",
"def __show_partial_lines(file_path, limit, cut_method):\n\n lines = fs.read_file(file_path)\n if limit > len(lines):\n raise Exception(\"Index out of bound. {0} is greater than lines in file ({1} lines)\".format(limit, len(lines)))\n for ln in cut_method(lines, limit):\n print(ln)",
"def show_market_capitalization_line_graph(window, k):\n highest_volatility_group = get_highest_volatility_group(window, k)\n highest_volatility_market_capitalization_sum = get_market_capitalization_sum(highest_volatility_group)\n\n highest_volatility_market_capitalization_sum['portion'].plot()\n plt.ylim([0, 1])\n plt.title('portion')\n plt.show()\n\n highest_volatility_market_capitalization_sum['selected_sum'].plot()\n plt.ylim([0, highest_volatility_market_capitalization_sum['selected_sum'].max() * 2])\n plt.title('market capitalization')\n plt.show()",
"def drawIndexLines_income(t0, t1, t2, t3):\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n\r\n t0.rt(90)\r\n t0.fd(240)\r\n t0.left(90)\r\n t0.fd(590)\r\n\r\n t1.rt(90)\r\n t1.fd(240)\r\n t1.left(90)\r\n t1.fd(570)\r\n\r\n t2.rt(90)\r\n t2.fd(240)\r\n t2.left(90)\r\n t2.fd(550)\r\n\r\n t3.rt(90)\r\n t3.fd(240)\r\n t3.left(90)\r\n t3.fd(530)\r\n drawLines_income(t0, t1, t2, t3)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t3.rt(90)",
"def _get_lines(self, cr, uid, ids, context=None):\n List=[]\n if ids:\n line = self.pool.get('payment.enrich.lines').browse(cr, uid, ids[0], context=context)\n \n record = line.enrich_id\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n record.write(res)\n return List",
"def get_model(self):\n order = get_object_or_404(Order, user=self.request.user, number=self.kwargs['order_number'])\n return order.lines.get(id=self.kwargs['line_id'])",
"def __str__(self):\n number_stars = (30-len(self.name))//2\n title_line = '*'*number_stars+self.name+'*'*number_stars\n corpus = ''\n for i in range(len(self.ledger)):\n corpus += (((self.ledger[i])['description']))[0:min(23, len((self.ledger[i])['description']))].ljust(23)+(\n str(\"{:.2f}\".format(round(float((self.ledger[i])['amount']), 2)))).rjust(7)+'\\n'\n Total = 'Total: '+str(\"{:.2f}\".format((round(float(self.get_balance()), 2))))\n return title_line+'\\n'+corpus+Total",
"def action_view_invoice_salon(self):\n return {\n 'name': 'Invoices',\n 'domain': [('invoice_origin', '=', self.name)],\n 'res_model': 'account.move',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'type': 'ir.actions.act_window',\n }",
"def drawLines_income(t0, t1, t2, t3):\r\n t0.pd()\r\n t1.pd()\r\n t2.pd()\r\n t3.pd()\r\n t0.pencolor(\"blue\")\r\n t0.pensize(3)\r\n t1.pensize(3)\r\n t2.pensize(3)\r\n t3.pensize(3)\r\n t1.pencolor(\"red\")\r\n t2.pencolor(\"green\")\r\n t3.pencolor(\"gold\")\r\n t0.rt(90)\r\n t1.rt(90)\r\n t2.rt(90)\r\n t3.rt(90)\r\n t0.fd(70)\r\n t1.fd(70)\r\n t2.fd(70)\r\n t3.fd(70)",
"def display_line(self):\n line = self.line\n hosts = self.hosts\n if not self.check_line():\n return\n self.msg(\"|wThis line is hosted by:|n %s\" % \", \".join(str(ob) for ob in hosts))\n self.msg(\"|wCurrent line order:|n %s\" % \", \".join(str(ob) for ob in line))",
"def line_detail(request, id, format=None):\n try:\n snippet = Line_Assets.objects.get(id=id)\n except Line_Assets.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = LineSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = LineSerializer(snippet, data=request.data)\n old_name = snippet.line_name\n if serializer.is_valid():\n serializer.save()\n recordAssets.delay(user=str(request.user),\n content=\"修改出口线路类型:{old_name} -> {line_name}\".format(old_name=old_name,\n line_name=request.data.get(\n \"line_name\")), type=\"line\",\n id=id)\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n if not request.user.has_perm('ops.can_delete_line_assets'):\n return Response(status=status.HTTP_403_FORBIDDEN)\n snippet.delete()\n recordAssets.delay(user=str(request.user), content=\"删除出口线路:{line_name}\".format(line_name=snippet.line_name),\n type=\"line\", id=id)\n return Response(status=status.HTTP_204_NO_CONTENT)"
]
| [
"0.6536247",
"0.59003717",
"0.5458764",
"0.5376334",
"0.529271",
"0.5250406",
"0.524472",
"0.5224364",
"0.5208676",
"0.5186037",
"0.5180147",
"0.5137883",
"0.5071792",
"0.50220627",
"0.49618864",
"0.49447742",
"0.4937259",
"0.49017215",
"0.48628005",
"0.4855341",
"0.481696",
"0.48116407",
"0.48091274",
"0.48060212",
"0.47997347",
"0.47972915",
"0.47830504",
"0.4752376",
"0.4745413",
"0.4718461"
]
| 0.75069165 | 0 |
Get token sequence from cursor when preordered traversing.(exclude node in 'exclude_types') | def get_tokens(self):
def _traverse_preorder(cursor, token_list): # There is a method called "walk_preorder" in Cursor class. Here we need to ignore some subtrees so we implement on our own.
if cursor.location.file and cursor.location.file.name != self.filepath: # exclude "#include <...>"
return
if (cursor.kind, cursor.spelling) in exclude_types: # exclude node in 'exclude_types'
return
token_list.append(cursor)
for child in cursor.get_children():
_traverse_preorder(child, token_list)
tokens = []
_traverse_preorder(self.cursor, tokens)
return tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cursor(self):\n try:\n return self.tokens[self.pos]\n except IndexError:\n raise ParseError(\"No tokens left for cursor to traverse\")",
"def walk(predicate, cursor):\n return (c for c in cursor.walk_preorder() if predicate(c))",
"def gettok(self):\n try:\n self.next = next(self.tokens)\n except StopIteration:\n self.next = None",
"def test_get_token_cursor(self):\n tu = get_tu('class A {}; int foo(A var = A());', lang='cpp')\n foo = get_cursor(tu, 'foo')\n\n for cursor in foo.walk_preorder():\n if cursor.kind.is_expression() and not cursor.kind.is_statement():\n break\n else:\n self.fail(\"Could not find default value expression\")\n\n tokens = list(cursor.get_tokens())\n self.assertEqual(len(tokens), 4, [t.spelling for t in tokens])\n self.assertEqual(tokens[0].spelling, '=')\n self.assertEqual(tokens[1].spelling, 'A')\n self.assertEqual(tokens[2].spelling, '(')\n self.assertEqual(tokens[3].spelling, ')')\n t_cursor = tokens[1].cursor\n self.assertEqual(t_cursor.kind, CursorKind.TYPE_REF)\n r_cursor = t_cursor.referenced # should not raise an exception\n self.assertEqual(r_cursor.kind, CursorKind.CLASS_DECL)",
"def _peek(self):\n return self.token_list[self._current]",
"def pre_order(self):\n stack = []\n node = self\n while stack or node:\n if node:\n yield node.val\n stack.append(node)\n node = node.left\n else:\n node = stack.pop()\n node = node.right",
"def _pre_order_helper(self, node):\n curr = node\n yield curr._data\n if curr._lkid:\n for node_data in self._pre_order_helper(curr._lkid):\n yield node_data\n if curr._rkid:\n for node_data in self._pre_order_helper(curr._rkid):\n yield node_data",
"def _previous(self):\n return self.token_list[self._current - 1]",
"def preorderTraversal(self, root: TreeNode) -> List[int]:\n def preorder(root,seq):\n if root is None:\n return seq\n seq.append(root.val)\n preorder(root.left,seq)\n preorder(root.right,seq)\n return seq\n \n prelist= []\n return preorder(root,prelist)",
"def _get_sequence(self, cursor):\n raise NotImplementedError",
"def pre_order(self):\n if self.val is not None:\n yield self.val\n if self.left is not None:\n for i in self.left.pre_order():\n yield i\n if self.right is not None:\n for i in self.right.pre_order():\n yield i",
"def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()",
"def before_cursor_next(self, cursor):\n pass",
"def prev_token(self, tok, include_extra=False):\n # type: (Token, bool) -> Token\n i = tok.index - 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i -= 1\n return self._tokens[i]",
"def next_token(self, context, token):",
"def positions(self):\n return self.preorder() # return entire preorder iteration",
"def preorder_traverse(self):\n \n keys = []\n\n if not self.node:\n return keys\n \n keys.append(self.node.vp.index)\n keys.extend(self.node.left.preorder_traverse())\n keys.extend(self.node.right.preorder_traverse())\n\n return keys",
"def findLeftContext(tree, start, ignore):\t\n nrOfClosingBrs = 0\n nrOfOpeningBrs = 0\n firstPass = True\n for currentIndex in range(start-1,-1,-1):\n if tree[currentIndex].symbol in ignore:\n continue\n elif tree[currentIndex].symbol == \"[\":\n if not firstPass:\n nrOfOpeningBrs = nrOfOpeningBrs + 1\n elif tree[currentIndex].symbol == \"]\":\n nrOfClosingBrs = nrOfClosingBrs + 1\n elif nrOfClosingBrs == nrOfOpeningBrs:\n return(tree[currentIndex])\n firstPass = False\n return(emptyModule())",
"def preorder_print(self, start, traversal):\n return traversal",
"def preorder(self, u=NULL):\n return self._ll_tree.get_preorder(u)",
"def _subtree_preorder(self, p):\n yield p # visit p first before visiting its subtrees\n for c in self.children(p):\n for pos in self._subtree_preorder(c):\n yield pos",
"def preorder_traverse_to_list(self):\n\t\tif (not self.root()):\n\t\t\treturn None\n\t\tself._synchronize_attributes()\n\t\treturn self._preorder_traverse_to_list_helper(self.root(), 1)",
"def get_tokens(self, node, include_extra=False):\n # type: (AstNode, bool) -> Iterator[Token]\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)",
"def get_tokens_unprocessed(self, text):\n self.cur = []\n start = end = self.whitespace(0, text)\n while start <= end < len(text):\n try:\n # try line number\n while text[end] in self.NUMBERS:\n end += 1\n if end != start: # actual number present\n self.cur.append((start, Number.Integer, text[start:end]))\n # whitespace is required after a line number\n orig = len(self.cur)\n try:\n start = end = self.whitespace(end, text, True)\n except AssertionError:\n del self.cur[orig:]\n start = end = self.error_till_line_end(end, text)\n continue\n # at this point it could be a comment\n match = self.COMMENT.match(text, start)\n if match is not None:\n self.cur.append((start, Comment, text[start:match.end()]))\n start = end = match.end()\n # anything after the closing bracket is invalid\n start = end = self.error_till_line_end(start, text)\n # do not attempt to process the rest\n continue\n del match\n if text[start] in '[]': # fantasy push or pop\n self.cur.append((start, Keyword, text[start]))\n start += 1\n end += 1\n else:\n # one formula, possibly containing subformulae\n orig = len(self.cur)\n try:\n start = end = self.formula(start, text)\n except (AssertionError, RecursionError): # not well-formed\n del self.cur[orig:]\n while text[end] not in self.WHITESPACE:\n end += 1\n self.cur.append((start, Error, text[start:end]))\n start = end\n # skip whitespace after formula\n orig = len(self.cur)\n try:\n start = end = self.whitespace(end, text, True)\n except AssertionError:\n del self.cur[orig:]\n start = end = self.error_till_line_end(start, text)\n continue\n # rule proving this formula a theorem\n orig = len(self.cur)\n try:\n start = end = self.rule(start, text)\n except AssertionError:\n del self.cur[orig:]\n start = end = self.error_till_line_end(start, text)\n continue\n # skip whitespace after rule\n start = end = self.whitespace(end, text)\n # line marker\n if text[start] == '(':\n orig = len(self.cur)\n try:\n start = end = self.lineno(start, text)\n except AssertionError:\n del self.cur[orig:]\n start = end = self.error_till_line_end(start, text)\n continue\n start = end = self.whitespace(start, text)\n except IndexError:\n try:\n del self.cur[orig:]\n except NameError:\n pass # if orig was never defined, fine\n self.error_till_line_end(start, text)\n return self.cur",
"def preorder(self):\n return (node for node in self.get_preorder(self.root))",
"def peek_token(self):\n tok = next(self)\n self.unpop_token(tok)\n return tok",
"def get_tokens(self, document):\n raise NotImplementedError()",
"def get_tokens_unprocessed(self, text, stack=('root',)):\n pos = 0\n tokendefs = self._tokens\n if hasattr(self, '_saved_state_stack'):\n statestack = list(self._saved_state_stack)\n else:\n statestack = list(stack)\n statetokens = tokendefs[statestack[-1]]\n while 1:\n for rexmatch, action, new_state in statetokens:\n m = rexmatch(text, pos)\n if m:\n if action is not None:\n if type(action) is _TokenType:\n yield pos, action, m.group()\n else:\n for item in action(self, m):\n yield item\n pos = m.end()\n if new_state is not None:\n # state transition\n if isinstance(new_state, tuple):\n for state in new_state:\n if state == '#pop':\n statestack.pop()\n elif state == '#push':\n statestack.append(statestack[-1])\n else:\n statestack.append(state)\n elif isinstance(new_state, int):\n # pop\n del statestack[new_state:]\n elif new_state == '#push':\n statestack.append(statestack[-1])\n else:\n assert False, \"wrong state def: %r\" % new_state\n statetokens = tokendefs[statestack[-1]]\n break\n else:\n try:\n if text[pos] == '\\n':\n # at EOL, reset state to \"root\"\n pos += 1\n statestack = ['root']\n statetokens = tokendefs['root']\n yield pos, Text, u'\\n'\n continue\n yield pos, Error, text[pos]\n pos += 1\n except IndexError:\n break\n self._saved_state_stack = list(statestack)",
"def preorder_iterator(node):\n yield node\n for child in node.children:\n yield from preorder_iterator(child)",
"def GetCurrentToken(tokens, pos):\n i = 0\n while i < len(tokens):\n if pos > tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n\n return tokens[len(tokens)-1] if tokens else None"
]
| [
"0.5911447",
"0.58838665",
"0.57942325",
"0.578882",
"0.57581866",
"0.5740371",
"0.5737432",
"0.5732957",
"0.572129",
"0.56305766",
"0.5559917",
"0.5542572",
"0.55267996",
"0.55039823",
"0.54972017",
"0.549295",
"0.54885024",
"0.54572934",
"0.54447335",
"0.5441865",
"0.54028213",
"0.53961605",
"0.53785986",
"0.5372904",
"0.53659225",
"0.5358439",
"0.5353255",
"0.5345952",
"0.533741",
"0.53311676"
]
| 0.6444085 | 0 |
Return the integer part of the square root of x, even for very large integer values. Python 'math' module does not operate as expected for large integers. | def integer_sqrt(x: int):
assert x > 0
_1_40 = 1 << 40 # 2**40
if x < _1_40:
return int(sqrt(x)) # use math's sqrt() for small parameters
n = int(x)
if n <= 1:
return n # handle sqrt(0)==0, sqrt(1)==1
# Make a high initial estimate of the result (a little lower is slower!!!)
r = 1 << ((n.bit_length() + 1) >> 1)
while True:
newr = (r + n // r) >> 1 # next estimate by Newton-Raphson
if newr >= r:
return r
r = newr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n d = 0.1\n y = x / 2\n z = (y + x/y) / 2\n e = abs(z-y)\n while e > d:\n y = z\n z = (y + x/y) / 2\n e = abs(z - y)\n return int(z)",
"def my_sqrt(x):\n square_root = x**(0.5)\n return square_root",
"def get_int_sqrt(x):\n l = 0\n h = x\n while l <= h:\n m = l + (h - l) / 2\n product = m * m\n if product <= x:\n l = m + 1\n else:\n h = m - 1\n return l - 1",
"def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))",
"def sqrt(x):\n def good_enough(guess):\n precision = 0.001\n f = abs(guess ** 2 - x)\n return (f < precision)\n \n def improve(guess):\n return (guess + x/guess) / 2.0\n \n counter = 1\n guess = 1\n while not good_enough(guess) and counter <= 100:\n guess = improve(guess)\n counter += 1\n assert counter <= 100,'100 iterations done and no good answer' \n return int(guess)",
"def squareroot(number):\n return math.sqrt(number)",
"def sqrt(x):\n if x < 0:\n raise ValueError(f\"Cannot compute sqrt of negative number {x}\")\n guess = x\n i = 0\n while guess * guess !=x and i < 20:\n guess = (guess + x / guess) / 2.0\n i += 1\n return guess",
"def sqrt(x):\n # lets check that x is positive\n if x < 0:\n print(\"Error: negative value was supplied\")\n return -1\n\n\n # Initial guess for the square root \n z = x / 2.0 \n \n # Continuously improve the guess.\n while abs(x - (z*z)) > 0.01: \n z = z - (((z*z) - x) / (2*z))\n \n return z",
"def sqrt(number):\n number = abs(number)\n bit = 1 << 62 # second to top of 64 bit is 62, 32 bit would be 1 << 30\n result = 0\n\n # Start with the highest power of 4 that is less than the number\n while bit > number:\n bit >>= 2\n\n while bit != 0:\n if number >= result + bit:\n number -= result + bit\n result = (result >> 1) + bit\n else:\n result >>= 1\n bit >>= 2\n\n return result",
"def sqrt(x):\n return 0.0",
"def sqr(x):\n return x ** 2",
"def _intsqrt(v):\n c = 0\n\n while c**2 <= v:\n c += 1\n \n return c - 1",
"def square(x: float) -> float:\n return x * x",
"def Sqr(num):\n return math.sqrt(float(num))",
"def sqrt(n, one):\n # Use floating point arithmetic to make an initial guess\n floating_point_precision = 10**16\n n_float = float((n * floating_point_precision) // one) / floating_point_precision\n x = (int(floating_point_precision * math.sqrt(n_float)) * one) // floating_point_precision\n n_one = n * one\n while 1:\n x_old = x\n x = (x + n_one // x) // 2\n if x == x_old:\n break\n return x",
"def sqr(x):\n return x * x",
"def sqrt(x):\n guess = x\n i = 0\n\n try:\n while guess*guess != x and i < 20:\n guess = (guess+x/guess)/2.0\n i =+1\n except ZeroDivisionError:\n raise ValueError()\n return guess",
"def find_root_1(x, n, p=0.001):\n step = p / 10\n guess = step\n while abs(guess ** n - x) > p:\n guess += step\n return round(guess, 3)",
"def sroot(n):\n\n return int(n ** 0.5) == n ** 0.5",
"def n_root_of_x(n, x):\n if n==0:\n return 1\n \n return 1 if n==0 else x**(1.0/n)",
"def find_root_2(x, n, p=0.001):\n step = p / 10\n left, right = 0, x\n while True:\n guess = (left + right) / 2\n result = guess ** n\n if abs(result - x) <= p:\n break\n elif result > x + p:\n right = guess - step\n else:\n left = guess + step\n\n if round(guess) ** n == x:\n return round(guess)\n return round(guess, 3)",
"def norm_sqr(x):\n return inner_prod(x, x)[0]",
"def nthRoot(x,n):\n return op.pow(x,1/n)",
"def sqrt(number):\n if number == 0 or number == 1:\n return number\n if number < 0:\n return None\n s = 1\n e = number/2\n while s <= e:\n mid = (s+e)//2\n if (mid*mid == number):\n return mid\n if mid*mid < number:\n s = mid+1\n res = mid\n else:\n e = mid - 1\n return res",
"def sqrt(x):\n ans=0\n if x>=0:\n while ans*ans<x: ans=ans+1\n if ans*ans != x:\n print(x, \" is not a perfect square!\")\n return None\n #Note, it returns ans, but it does not print it!!\n else: return ans\n else:\n print(x, \" is a negative number!\")\n return None",
"def find_bigger_sqrt_number(num):\n\n tmpPos = num\n while np.sqrt(tmpPos) % 1 != 0:\n tmpPos += 1\n return int(np.sqrt(tmpPos))",
"def sq(x):\n\n return x ** x",
"def sqrt(number):\n # check for negative inputs\n if number < 0:\n return None\n # square root of 1 and 0 is 1 and 0\n elif number in [1, 0]:\n return number\n\n # initialise upper and lower bound\n high = number\n low = 0\n\n while low < high:\n # mid is the average of high and low\n mid = (high + low) // 2\n # if mid ** 2 is the number, return the mid value\n # OR, if mid ** 2 is smaller than the number and (mid + 1) ** 2 is larger than the number,\n # return the mid number as it's the floor value\n if mid**2 <= number < (mid+1)**2:\n return mid\n # mid is too high, change high var to mid\n elif mid**2 > number:\n high = mid\n # mid is too low, change low var to mid\n else:\n low = mid",
"def sqrt(x: int):\n pass",
"def sqrt(number):\n if number < 2:\n return number\n \n left = 0\n right = number\n \n while left<right:\n mid = (left + right) // 2\n square = mid*mid\n \n if square == number:\n return mid\n \n elif square < number:\n left = mid + 1\n \n else:\n right = mid\n \n return left-1"
]
| [
"0.8057233",
"0.7838644",
"0.78350353",
"0.75958186",
"0.73332125",
"0.72782975",
"0.72482723",
"0.7227843",
"0.7005679",
"0.7000568",
"0.6877222",
"0.68424505",
"0.6837487",
"0.67499954",
"0.67402005",
"0.6725246",
"0.6717746",
"0.6702041",
"0.6646816",
"0.66265327",
"0.6616788",
"0.65962005",
"0.6590235",
"0.6577773",
"0.6562015",
"0.6556057",
"0.6508906",
"0.64863795",
"0.6481543",
"0.6445983"
]
| 0.8287289 | 0 |
Return the nearest value to a given one in a list. | def closestValue(aList: list, givenV: int):
abs_diff = lambda list_value: abs(list_value - givenV)
return min(aList, key=abs_diff) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getnearest(iterable, value):\n return min(enumerate(iterable), key=lambda i: abs(i[1] - value))",
"def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1",
"def _get_index_closest_val(list, val):\n\n return min(range(len(list)), key=lambda i: abs(list[i]-val))",
"def closest_match(num,num_list):\n\tdiffs = np.abs(np.subtract(num,num_list))\n\treturn num_list[np.argmin(diffs)]",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]",
"def find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx]",
"def find_nearest(array, value):\n if isinstance(array, list):\n array = np.array(array)\n idx = (np.abs(array-value)).argmin()\n return idx",
"def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))",
"def find_min(list):\n return find_value_at(list, -1)",
"def takeClosest(myList, myNumber):\r\n pos = bisect_left(myList, myNumber)\r\n if pos == 0:\r\n return myList[0]\r\n if pos == len(myList):\r\n return myList[-1]\r\n before = myList[pos - 1]\r\n after = myList[pos]\r\n if after - myNumber < myNumber - before:\r\n return after\r\n else:\r\n return before",
"def find_nearest(value,array):\n idx = numpy.abs(value-array).argmin()\n return idx,array[idx]",
"def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx",
"def find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"",
"def get_closest_value_index_in_sorted_list(value, list_):\n if value <= list_[0]:\n return 0\n if value >= list_[-1]:\n return len(list_) - 1\n pos = bisect.bisect_left(list_, value)\n before = list_[pos - 1]\n after = list_[pos]\n if after - value < value - before:\n return pos\n else:\n return pos - 1",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def takeClosest(myList, myNumber):\n pos = bisect_left(myList, myNumber)\n if pos == 0:\n return 0 #myList[0]\n if pos == len(myList):\n return len(myList)-1 #myList[-1]\n\n before = myList[pos - 1]\n after = myList[pos]\n\n if after - myNumber < myNumber - before:\n return pos #after\n else:\n return pos-1 #before",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx",
"def find_nearest(arr, val):\n arr = np.asarray(arr)\n idx = (np.abs(arr - val)).argmin()\n return idx, arr[idx]",
"def findNearest(myInterval,IntervalList):\n \n myDist = 9999999999999999999\n res = 0\n for i in IntervalList:\n distance = myInterval.distance(i)\n if distance > 0 and distance < myDist:\n myDist = distance\n res = i\n return res",
"def find_nearest(arr, val):\n\n arr = np.asarray(arr)\n idx = (np.abs(arr - val)).argmin()\n return idx, arr[idx]",
"def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]",
"def closest_point(point, points):\n return points[cdist([point], points).argmin()]",
"def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx",
"def find_nearest(array, value, index=False):\n \n idx = (numpy.abs(numpy.array(array) - value)).argmin()\n error = array[idx] - value\n if index:\n return idx, error\n else:\n return array[idx], error",
"def nearestIndex(array, value):\n idx = np.searchsorted(array, value, side='left')\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return idx-1\n return idx"
]
| [
"0.82866627",
"0.7399142",
"0.73603547",
"0.73322725",
"0.73053616",
"0.7174279",
"0.7174279",
"0.71739745",
"0.71574485",
"0.71542096",
"0.7151316",
"0.71261716",
"0.7085714",
"0.70616233",
"0.7042835",
"0.69857186",
"0.6985201",
"0.69779974",
"0.69681966",
"0.69561845",
"0.69347656",
"0.69347656",
"0.6885732",
"0.6854045",
"0.68369055",
"0.68099815",
"0.6724208",
"0.67208517",
"0.66944027",
"0.66458476"
]
| 0.76117915 | 1 |
Return a randomlychosen element from a list and remove it. Be careful to set bucket = GivenList.copy() to not loose original variable ! | def randomClosureChoice(bucket: list):
import secrets
choice = secrets.choice(bucket)
bucket.remove(choice)
return choice | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_randomico(lista, qtd_remocao):\n for i in range(qtd_remocao):\n lista.pop(random.randrange(len(lista))) \n return lista",
"def popitem(self):\n all_items = self.items()\n removed_item = random.choice(all_items)\n self[removed_item[0]] = None\n return removed_item",
"def pop_random(self):\n\n rand_index = randint(0, len(self._list) - 1)\n item = self._list[rand_index]\n self.remove(item)\n return item",
"def randomchooseanddelete():\n\t\tvar = random.choice(unassigned)\n\t\tunassigned.remove(var)\n\t\treturn var",
"def _random_pick(lst):\n\n choice = random.randint(0, len(lst) - 1)\n return lst[choice]",
"def rand_pop(l: list):\n i = randrange(len(l)) \n l[i], l[-1] = l[-1], l[i] \n return l.pop()",
"def getRandom(self) -> int:\n index = random.randint(0, len(self.lst) - 1)\n # self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n # val = self.lst.pop()\n # self.dic.pop(val)\n return self.lst[index]",
"def deal_one(self):\r\n rand_card = random.choice(self.cards_list)\r\n self.cards_list.remove(rand_card)\r\n return rand_card",
"def delete_element(some_list, index):\n del some_list[index]\n return some_list",
"def random_pop (self, checkfn=None):\n if len(self) == 0:\n return None\n\n index = self.random_pick(checkfn=checkfn)[0]\n\n if index == None:\n return None\n\n return self.pop(index)",
"def get_rand_list(alist, th=.5):\n nlist = []\n counter = math.ceil( len(alist)*th )\n \n while(counter):\n num = nprand.randint(0,len(alist))\n assert num < len(alist)\n nlist.append( alist[num] )\n alist.pop(num)\n # del alist[num]\n counter = counter - 1\n \n return [alist, nlist]",
"def remove_from_list(self,list_,index):\r\n try:\r\n return list_.pop(self._index_to_int(index))\r\n except IndexError:\r\n self._index_error(list_,index)",
"def list_delete_item(list_arg: list, item_to_delete: Any) -> list:\n from itertools import filterfalse\n\n result = list(filterfalse(lambda x: x == item_to_delete, list_arg))\n return result",
"def pop_random(random, values):\n\n # We pick the element at a random index. Rather than removing that element\n # from the list (which would be an O(n) operation), we swap it to the end\n # and return the last element of the list. This changes the order of\n # the elements, but as long as these elements are only accessed through\n # random sampling that doesn't matter.\n i = random.randrange(0, len(values))\n values[i], values[-1] = values[-1], values[i]\n return values.pop()",
"def hit():\r\n new_card = deck[random.randint(1, len(deck))]\r\n deck.remove(new_card)\r\n return new_card",
"def random_pop (self, checkfn=None):\n if len(self) == 0:\n return None\n item = random.randint(0, len(self)-1)\n if checkfn is not None:\n tries = len(self) * 5\n while not checkfn(self[item]):\n item = random.randint(0, len(self)-1)\n tries = tries - 1\n if tries <= 0:\n return None\n return self.pop(item)",
"def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]",
"def remove_point(mutated_genome,index):\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n del mutated_genome[index][2][point_index]",
"def remove(self,element):\n\t\telem_lst_0 = self.elem_lst\n\t\tif type(element)==type([]):\n\t\t\tfor elem in element:\n\t\t\t\tif type(elem)==type('string'):\n\t\t\t\t\t# Find the occurance of input 'elem' in the list and returns it\n\t\t\t\t\telem_0 = next((x for x in elem_lst_0 if x.name == elem), None)\n\t\t\t\t\telem_lst_0 = [x for x in elem_lst_0 if not x.name == elem]\n\t\t\t\telif type(elem)==type(2):\n\t\t\t\t\telem_0 = next((x for x in elem_lst_0 if x.number == elem), None)\n\t\t\t\t\telem_lst_0 = [x for x in elem_lst_0 if not x.number == elem]\n\t\t\treturn elem_lst_0\n\t\tif type(element)==type('string'):\n\t\t\t# Find the occurance of input 'elem' in the list and returns it\n\t\t\telem_0 = next((x for x in self.elem_lst if x.name == element), None)\n\t\t\telem_lst_new = [x for x in self.elem_lst if not x.name == element]\n\t\t\treturn elem_lst_new\n\n\t\telif type(element)==type(2):\n\t\t\telem_0 = next((x for x in self.elem_lst if x.number == element), None)\n\t\t\telem_lst_new = [x for x in self.elem_lst if not x.number == element]\n\t\t\treturn elem_lst_new",
"def _primerElem(l):\n return l[0]",
"def randomizer(start, stop, step):\n l = list(range(start, stop, step))\n random.shuffle(l)\n \n def pop_from_nonempty():\n if len(l)>0:\n a = l.pop()\n return(a)\n else:\n return(None)\n \n return(pop_from_nonempty)",
"def deleteItem(list,item):\n print \"I deleted this item:\", item\n list.remove(item)",
"def pop_and_return(l, n):\n o = l.copy()\n if (n >= len(l)) or (n < 0):\n raise ValueError(\"Index n = %d out of range\" % (n))\n if len(l) == 0:\n raise ValueError(\"The supplied list must contain at least one element.\")\n del o[n]\n return o",
"def remove(self, value):\n for i, v in enumerate(self):\n if v == value:\n self._table.pop(i); return\n raise ValueError, \"list.remove(x): x not in list\"",
"def _remove_from_item(self, item, remove_list):\n for k in remove_list:\n # Will raise KeyError if asked to remove non-existent item\n try:\n item.pop(k)\n except KeyError:\n # No worries removing non-existent item. Let's log it\n # just in case.\n self.log.debug('Unable to remove {} for the following item: '\n '{}'.format(k, item))\n\n return item",
"def remove_element(elem, lst, raise_error=True):\n\n result = lst.copy()\n try:\n result.remove(elem)\n except ValueError:\n if raise_error:\n raise\n return result",
"def wants_card_remove(self, card: mtgsdk.Card, list: str):\n l = self.wants[list]\n l.remove(card)\n self.db.wants_card_remove(list, card.multiverse_id)\n util.log(\"Removed '{}' from wants list '{}'\".format(card.name, list), util.LogLevel.Info)",
"def remove_adjacent(some_list):\n # This function will reduce element that have the same value next to it to single element.\n bucket = []\n for i in range(len(some_list)):\n try:\n #print(\"{0:>3}-{1:<3}\".format(f\"{some_list[i]}\",f\"{some_list[i+1]}\"),end=\"\")\n if some_list[i] == some_list[i+1]:\n bucket.append(some_list[i])\n #print(\"same!!\",end=\"\")\n except:\n pass\n #print(\"\")\n for j in bucket:\n some_list.remove(j)\n return some_list",
"def drop(self):\r\n\t\t#print \"drop_list: {0}\".format(\" \".join(self.gb.drop_list))\r\n\t\tresult = []\r\n\t\tall_cards = [self.wang_list, self.tube_list, self.bamb_list, self.word_list, self.wind_list]\r\n\t\tprevious = \"\"\r\n\t\tfor cards in all_cards:\r\n\t\t\tfor i in range(len(cards)):\r\n\t\t\t\t\"\"\" avoid running same card \"\"\"\r\n\t\t\t\tif (cards[i] == previous): continue\r\n\t\t\t\tc = cards.pop(i)\r\n\t\t\t\tprevious = c\r\n\t\t\t\tmini, useful_amount, score = self.count_steps()\r\n\t\t\t\tcards.insert(i, c)\r\n\t\t\t\tresult.append([mini, useful_amount, score, c])\r\n\t\t\t\t#print \"min: {0}, useful_amount: {1}, score: {2}, dcard: {3}\".format(mini, useful_amount, score, c)\r\n\r\n\t\tdcard = self.sorting_by_criteria(result)\r\n\t\t#print \"\\tGeniusAgent drop: {0}\".format(dcard)\r\n\t\tctype = GameBoard.CardType(dcard)\r\n\t\tall_cards[ctype-1].remove(dcard)\r\n\t\tself.card_count -= 1\r\n\t\treturn dcard",
"def testRemove(self):\n\n numIns = randint(70,200)\n\n for i in xrange(numIns):\n self.s.insert(i, None)\n for i in xrange(numIns):\n self.s.remove(i)"
]
| [
"0.66138875",
"0.6564587",
"0.65421885",
"0.6370683",
"0.6352258",
"0.6317444",
"0.61707795",
"0.61280763",
"0.6087329",
"0.60718876",
"0.6040918",
"0.60279614",
"0.58906007",
"0.58702135",
"0.58437747",
"0.58181566",
"0.5816611",
"0.57801473",
"0.57371235",
"0.5733281",
"0.5715823",
"0.5663974",
"0.5643587",
"0.56341445",
"0.5613302",
"0.5597743",
"0.5587632",
"0.55818063",
"0.55726063",
"0.556679"
]
| 0.66175485 | 0 |
Return a boolean of if the value are coprime. Two values are said to be coprime if they have no common prime factors. This is equivalent to their greatest common divisor (gcd) being 1. | def coprime(a: int, b: int):
return euclid(a, b) == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coprime(a, b):\n return gcd(a, b) == 1",
"def coprime(self,x,y):\r\n return x == 1 or y == 1 or not bool(self.cofactors(x,y))",
"def coprime(m,n):\r\n # The function uses the Euclid's algorithm for finding the greatest common divisor. The algorithm is recursive.\r\n # If the GCD is 1, when the numbers are coprime. If it is greater than 1, when the numbers aren't coprime.\r\n if n == 0 and m > 1:\r\n return False\r\n elif n == 0 and m == 1:\r\n return True\r\n return coprime(n, m - n * (m // n))",
"def coPrime(x):\n y = random.randint(4, x)\n if computeGCD(x, y) != 1:\n return coPrime(x)\n else:\n return y",
"def relPrime(a, b):\n if gcd(a, b) == 1:\n return True\n else:\n return False",
"def trialDivision(c, primes):\n\n for prime in primes: # for each prime\n if c % prime == 0: # check if c is a multiple\n return False # if it is return that it definitely isn't prime\n\n return True # else reutrn that it might be prime",
"def is_prime(value):\n\n if value < 2: raise ValueError\n\n for i in range(2, value):\n if value % i == 0:\n return False\n\n return True",
"def is_prime(value: int) -> bool:\n\n if value == 1:\n return False\n if value <= 0:\n raise ValueError(\"Value must be greater than zero\")\n\n for i in range(2, int(value**(1/2)) + 1):\n if value % i == 0:\n return False\n return True",
"def pairwise_coprime(listing: list):\n\n assert isinstance(listing, list)\n\n size = len(listing)\n\n for i in range(0, size - 1):\n for j in range(i + 1, size):\n if not coprime(listing[i], listing[j]):\n return False\n\n return True",
"def coPrime(x):\n\n n = x * 2 + 100000 # Upper limit for range of random integers\n y = random.randint(x * 2, n)\n if (fractions.gcd(x, y) != 1):\n return coPrime(x)\n else:\n return y",
"def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))",
"def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0",
"def is_prime(a):\n return all(a % i for i in xrange(2, a))",
"def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True",
"def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo",
"def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True",
"def is_prime(n):\n\tprime = True\n\tif n == 1:\n\t\tprime = False\n\tc = 2\n\twhile c * c <= n:\n\t\tif n % c == 0:\n\t\t\tprime = False\n\t\tc += 1\n\treturn prime",
"def prime(n: int) -> bool:\n factors = find_first(lambda i: n % i == 0, range(2, int(math.sqrt(n) + 1)))\n return len(list(factors)) == 0",
"def prime(n: int) -> bool:\n factors = find_first(lambda i: n % i == 0, range(2, int(math.sqrt(n) + 1)))\n return len(list(factors)) == 0",
"def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False",
"def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False",
"def is_prime(num):\n\tif num is 1:\n\t\treturn False\n\tif num % 2 is 0:\n\t\treturn num is 2\n\n\tdivision = 3\n\twhile (division * division) <= num:\n\t\tif num % division is 0:\n\t\t\treturn False\n\t\tdivision += 2\n\treturn True",
"def is_prime(self):\n pass",
"def is_perfect_number(x):\n return sum(proper_divisors(x)) == x",
"def is_prime(x: int) -> bool:\n return not any(x % i == 0 for i in range(2, int(math.sqrt(x)+1)))",
"def chk_consq_pfac(n, primelist): \n fctlst = uprime_factors(n,primelist)\n pmin = fctlst[0]\n ind = primelist.index(pmin)\n return primelist[ind + len(fctlst) -1] == fctlst[-1]",
"def is_prime(x: int) -> bool:\n if x < 2:\n return False\n if x != 2 and x % 2 == 0:\n return False\n for i in range(3, x // 2 + 1):\n if x % i == 0:\n return False\n return True",
"def is_perfect(n):\r\n if sum_proper_divisors(n) == n:\r\n return True\r\n else:\r\n return False",
"def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True",
"def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True"
]
| [
"0.8151116",
"0.76198447",
"0.6820226",
"0.6649169",
"0.65843964",
"0.6367718",
"0.62974745",
"0.62864256",
"0.6271171",
"0.6185582",
"0.6122723",
"0.6101257",
"0.60979694",
"0.60873055",
"0.6075344",
"0.5987454",
"0.5984497",
"0.5948677",
"0.5948677",
"0.59447944",
"0.59283316",
"0.5926765",
"0.59223217",
"0.59136033",
"0.5912665",
"0.58614546",
"0.58314025",
"0.58280486",
"0.57979786",
"0.57979786"
]
| 0.7659492 | 1 |
Check if elements of a list are pairwise coprime. | def pairwise_coprime(listing: list):
assert isinstance(listing, list)
size = len(listing)
for i in range(0, size - 1):
for j in range(i + 1, size):
if not coprime(listing[i], listing[j]):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_restraint_pairs_for_doubles(list): # Also consider that a1 and a2 can be switches\r\n for i in range(len(list) - 1):\r\n for j in range(i + 1, len(list)):\r\n if (list[i].r1 == list[j].r1 and list[i].r2 == list[j].r2) or (\r\n list[i].r1 == list[j].r2 and list[i].r2 == list[j].r1) or list[i].distance == list[j].distance:\r\n return True\r\n return False",
"def check_pairs(self, all_pr, curr):\n flag = True\n for pair_ox in all_pr:\n if (curr[0] == pair_ox or curr[1] == pair_ox):\n flag = False\n return flag",
"def chk_consq_pfac(n, primelist): \n fctlst = uprime_factors(n,primelist)\n pmin = fctlst[0]\n ind = primelist.index(pmin)\n return primelist[ind + len(fctlst) -1] == fctlst[-1]",
"def pairwise(a,b):\n return a != b",
"def toate_elementele_prime(lst):\n for x in lst:\n if nr_prim(x) is False:\n return False\n return True",
"def coprime(self,x,y):\r\n return x == 1 or y == 1 or not bool(self.cofactors(x,y))",
"def coprime(a: int, b: int):\n\n return euclid(a, b) == 1",
"def coprime(a, b):\n return gcd(a, b) == 1",
"def are_pairwise_disjoint(it, raise_error=False):\n all_elements = set()\n for (i, l) in enumerate(it):\n s = set(l)\n if all_elements.isdisjoint(s):\n all_elements = all_elements.union(s)\n else:\n if raise_error:\n raise ValueError('Set at index {} is not disjoint with'\n 'previous sets. Common entries are {}'.\n format(i, all_elements.intersection(s)))\n return False\n\n return True",
"def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c",
"def isosceles(sides: list) -> bool:\n\n return validate_triangle(sides) and len(set(sides)) <= 2",
"def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True",
"def check_for_doubles(list: t.List) -> bool:\r\n\r\n for i in range(len(list) - 1):\r\n for j in range(i + 1, len(list)):\r\n if list[i] == list[\r\n j]: # WTF?1 Seems to check for ref identity, not just value identity, even though all docs say ity checks only for value!!!!!!!\r\n return (True)\r\n return False",
"def get_pairs(self, data, linked_clusters):\n\n _linked_clusters = [_cluster.antecessor for _cluster in linked_clusters]\n pairs = [pair for pair in itertools.combinations(_linked_clusters, r=2)]\n do_not_merge = [False for pair in itertools.combinations(_linked_clusters, r=2)]\n paircount = 0\n for pair in itertools.combinations(_linked_clusters, r=2):\n cluster1 = pair[0]\n cluster2 = pair[1]\n\n if cluster1.number_of_members > cluster2.number_of_members:\n dnm = check_components(self, data, cluster2, [cluster1])\n else:\n dnm = check_components(self, data, cluster1, [cluster2])\n if np.any(dnm):\n do_not_merge[paircount] = True\n paircount += 1\n\n return pairs, do_not_merge",
"def all_consecutive(s):\n for (x, y) in pairwise(sorted(s)):\n if y - x != 1:\n return False\n return True",
"def allCrossing(forms):\n if len(forms) == 1: forms = forms[0]\n l = len(forms)\n for i in range(l):\n for j in range(i + 1, l):\n if not forms[i].crossForm(forms[j]):\n return False\n return True",
"def are_consecutive(int_list):\n\n if set(np.diff(int_list)) == set([1]):\n return True\n else:\n return False",
"def two_sum(target, ls):\n complements = set()\n for num in ls:\n if num in complements:\n return True\n complements.add(target - num)\n\n return False",
"def has_single_eligible_pair(n):\n pairs = [(a,b) for a,b in pairs_of_factors(n) if 1<a<100 and 1<b<100]\n num_eligible_pairs = sum(not is_sum_of_primes(a+b) for a,b in pairs)\n return len(pairs) > 1 and num_eligible_pairs==1",
"def is_ppc(C1, C2, i):\n c1, c2 = sorted(C1), sorted(C2)\n for k in range(len(c1)):\n if i <= c2[k]:\n # return False\n break\n if c1[k] != c2[k]:\n return False\n return True",
"def count_pairs(clusters_list, cluster_labels):\n algorithm_pairs = 0\n intersecting_pairs = 0\n for points_in_cluster in clusters_list:\n algorithm_pairs += (len(points_in_cluster)**2 - len(points_in_cluster)) / 2\n for pair in itertools.combinations(points_in_cluster, 2):\n if cluster_labels[pair[0]] == cluster_labels[pair[1]]:\n intersecting_pairs += 1\n return algorithm_pairs, intersecting_pairs",
"def anyCrossing(forms):\n if len(forms) == 1: forms = forms[0]\n l = len(forms)\n for i in range(l):\n for j in range(i + 1, l):\n if forms[i].crossForm(forms[j]):\n return True\n return False",
"def test_silly_asymmetrical_case(self):\n complex_id_list = [\"a\", \"b\", \"c\"]\n dataloader = lambda s: s\n calc_func = lambda c1,c2: c1 + c2\n \n actual,_ = complex_pairwise_calc(complex_id_list, dataloader, calc_func, callback = None, symmetry=False)\n \n expected = [(\"a\", \"b\", \"ab\"), ('a', 'c', 'ac'), ('b', 'a', 'ba'), ('b', 'c', 'bc'), ('c', 'a', 'ca'), ('c', 'b', 'cb'),\n ('a','a','aa'), ('b','b','bb'), ('c','c','cc')]\n \n self.assertEqual(actual, expected)",
"def get_total_identical_pairs(list_of_numbers):\n\n pairs = 0\n\n # Loop through the list of number\n for a in range(len(list_of_numbers)):\n\n # Start from after the first number of the possible pair\n for b in range(a + 1, len(list_of_numbers)):\n\n # Compare the first pair to next possible number pair\n if list_of_numbers[a] == list_of_numbers[b]:\n\n # The first number's index must be less than the second number's index\n if a < b:\n pairs += 1\n\n return pairs",
"def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r",
"def _distinct_primes_lemma(cls, primes):\n primes = sorted(primes)\n l = len(primes)\n for i in range(l):\n for j in range(i+1, l):\n if primes[j] % primes[i] == 1:\n return None\n return True",
"def is_onepair(holecards, flop, exclude_board=True):\n hand = tuple(chain(holecards, flop))\n\n if exclude_board:\n return hand_is_onepair(hand) and not flop_has_pair(flop)\n else:\n return hand_is_onepair(hand)",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def is_consecutive(a_list):\n current_value = a_list[0]-1\n for number in a_list:\n current_value +=1\n if current_value != number:\n return False\n return True",
"def is_cc(test_cases):\r\n \r\n n_inputs=len(test_cases[0])\r\n cc=[]\r\n was_true=False\r\n was_false=False\r\n \r\n for input in range(n_inputs):\r\n was_true=False\r\n was_false=False\r\n for test_case in test_cases:\r\n if test_case[input] == True:\r\n was_true=True\r\n else:\r\n was_false=True\r\n if was_true and was_false:\r\n cc.append(True)\r\n else:\r\n cc.append(False) \r\n return all(cc)"
]
| [
"0.62395567",
"0.61482817",
"0.6120177",
"0.6115486",
"0.6090449",
"0.592758",
"0.5890369",
"0.58721393",
"0.58647364",
"0.58555436",
"0.58174556",
"0.57909375",
"0.5681002",
"0.56805116",
"0.56683934",
"0.5644282",
"0.5635076",
"0.5634428",
"0.5613893",
"0.55734193",
"0.55714256",
"0.5570844",
"0.5536176",
"0.5461925",
"0.5450453",
"0.54372424",
"0.5428347",
"0.5426327",
"0.5392468",
"0.5376386"
]
| 0.854069 | 0 |
Decomposes an integer n into prime factors and returns the corresponding set. A prime number can only be divided by 1 or itself, so it cannot be factored any further! Every other whole number can be broken down into prime number factors. It is like the Prime Numbers are the basic building blocks of all numbers. Set exponent to True if you want to print p^e. | def findPrimeFactors(n: int, exponent: bool = False):
s = []
# Number of 2s that divide n
while n % 2 == 0:
s.append(2)
n = n // 2
nroot = integer_sqrt(n)
# n must be odd at this point. So we can
# skip one element (Note i = i +2)
for i in range(3, nroot, 2):
# While i divides n, print i and divide n
while n % i == 0:
s.append(i)
n = n // i
# This condition is to handle the case
# when n is a prime number greater than 2
if n > 2:
s.append(n)
uniqSorted = sorted(list(set(s)))
if exponent:
# using set to get unique list
return dict(zip(uniqSorted, [s.count(e) for e in uniqSorted]))
return uniqSorted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prime_factors_set(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n n /= d\n d = d + 1\n if d*d > n:\n if n > 1: factors.append(n)\n break\n return list(set(factors))",
"def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors",
"def prime_divisors(n):\r\n\treturn list(set(factors(n)))",
"def prime_divisors(n):\n\treturn tuple(set(factors(n)))",
"def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors",
"def primefactors(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def primeFactors(n):\n\n ps = primes(n)\n rest = n\n factors = {}\n for p in ps:\n if rest == 1:\n break\n\n if p ** 2 > n:\n if len(factors.keys()) > 0:\n factors[p] = 1\n else:\n factors[n] = 1\n break\n\n power = 0\n while rest % p == 0:\n power += 1\n rest = rest / p\n\n if power > 0:\n factors[p] = power\n\n return factors",
"def prime_factorization(n):\n # Code taken directly from \"Prime factorization - list\" at\n # http://stackoverflow.com/a/16996439.\n primfac = []\n d = 2\n while d*d <= n:\n while (n % d) == 0:\n primfac.append(d) # supposing you want multiple factors repeated\n n //= d\n d += 1\n if n > 1:\n primfac.append(n)\n return Multiset(primfac)",
"def factorize(n):\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct",
"def get_prime_factors(n):\n factors = {}\n if n <= 1: return {}\n \n while n != 1:\n if is_prime(n):\n factors[n] = 1\n break\n \n i = 2\n while i <= n:\n j = 0\n while n % i == 0 and n != 1:\n j += 1\n n //= i\n \n if j > 0:\n factors[i] = j\n break\n i += 1\n \n return factors",
"def factorize(n):\n it = factorize._prime_iterator\n factors = []\n it.reset()\n for p in it:\n if n == 1 or n < p * p:\n break\n if n % p == 0:\n n //= p\n m = 1\n while n % p == 0 and n > 1:\n n //= p\n m += 1\n factors.append((p, m))\n if n > 1:\n factors.append((n, 1))\n return factors",
"def prime_factors(n):\r\n factors = defaultdict(int)\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors[d]+=1\r\n n /= d\r\n d = d + 1\r\n if d*d > n:\r\n if n > 1: factors[n]+=1\r\n break\r\n return factors",
"def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1",
"def primefactors_with_multiplicity(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))",
"def _prime_factorization(n):\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors",
"def prime_factors(n) -> []:\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors",
"def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors",
"def factors_s(n, ret=False):\n f = set()\n if n < 4:\n return f\n limit = int(n / 2 + 1)\n for i in primeList:\n if i > limit:\n break\n while n != 1:\n if n % i:\n break\n else:\n n //= i\n f.add(i)\n else:\n break\n if ret:\n return (n, f)\n return f",
"def primefactors(n):\n seq = []\n val = 2\n while val <= n:\n if VERBOSE: print \"val: %s n: %s\" % (val, n)\n if n % val == 0:\n # Found a factor, shrink n by that factor \n # ie. n = 60, val = 2\n # Next pass n = 30, val = 2\n seq.append(val)\n n /= val\n else:\n # Not (or no longer) a factor\n val += 1\n\n return seq",
"def prime_factors(n):\n\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def prime_factors(n):\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def factorize(primes, n):\n factor = []\n for prime in primes:\n ex = 0\n while n % prime == 0:\n ex += 1\n n = n // prime\n if ex != 0:\n factor.append((prime, ex))\n\n return factor if n == 1 else None",
"def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors",
"def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf",
"def prime_factors(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers do not have prime',\n 'factors')\n L = []\n while n >= 2:\n i = low_prime(n)\n L.append(i)\n n //= i\n return L",
"def prime_factors(n: int) -> Dict[int, int]:\n if SHOW_WORKING: print(f\"prime_factors({n})\")\n original_n = n\n factors = {}\n\n while n % 2 == 0:\n print(f\"\\tChecking if {n} divides 2\")\n print(f\"\\t\\tYes--Adding 2\")\n if 2 in factors.keys():\n factors[2] += 1\n else:\n factors[2] = 1\n n //= 2\n\n checklimit: int = math.ceil(math.sqrt(n)) + 1\n for d in range(3, checklimit, 2):\n if n % d:\n print(f\"\\tChecking if {n} divides {d}\")\n print(f\"\\t\\tNo--moving on\")\n d += 1\n else:\n while n % d == 0:\n print(f\"\\tChecking if {n} divides {d}\")\n print(f\"\\t\\tYes--Adding {d}\")\n if d in factors.keys():\n factors[d] += 1\n else:\n factors[d] = 1\n n //= d\n if n > 1:\n factors[n] = 1\n\n print(f\"\\t{original_n} has prime factorisation {' * '.join([str(p) + '^' + str(e) for p, e in factors.items()])}\")\n return factors",
"def factors(n):\n factors = []\n for x in range(1, int(sqrt(n)+1)):\n if (n % x) == 0:\n factors += [x, n/x]\n \n return sorted(set(factors))",
"def list_of_divisibles(n):\n def is_prime(x, L = []):\n if x in L or x == 2:\n return True\n elif x == 1 or x % 2 == 0:\n return False\n for divisor in range(1, round(x ** .5)):\n if is_prime(divisor, L):\n if x % divisor == 0:\n return False\n return True\n \n def largest_exponent(i, n):\n \"\"\"\n Given a limit n and a base i, finds the largest exponenet x such that i ^ x <= n, and outputs i ^ x.\n\n \"\"\"\n x = 1\n while i ** x <= n:\n x += 1\n x -= 1\n print(i, x, i**x)\n return i ** x\n \n L = []\n for i in range(2, n+1):\n if i in L:\n continue\n elif is_prime(i):\n L.append(largest_exponent(i, n))\n return L",
"def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs"
]
| [
"0.7794402",
"0.75982153",
"0.7502622",
"0.74230844",
"0.74169475",
"0.73915225",
"0.7347283",
"0.7342058",
"0.7326059",
"0.7312405",
"0.7305889",
"0.72976536",
"0.7269161",
"0.72683316",
"0.7263194",
"0.7233486",
"0.72117335",
"0.7176523",
"0.7133461",
"0.7125444",
"0.71049845",
"0.7102853",
"0.70948744",
"0.70798004",
"0.70369196",
"0.7025249",
"0.7023387",
"0.7019528",
"0.70034313",
"0.69988996"
]
| 0.80150515 | 0 |
Reversible mapping using Chinese Remainder Theorem into/from Zpq. | def mapperCRT(elt, p: int, q: int, action: bool = True, Verbose: bool = False):
# Mapping
if action:
a = elt % p
b = elt % q
if Verbose and q != p:
print(f"Converting {elt} in Zpq to a in Zp and b in Zq.")
print(f"With a = {a} mod {p} and b = {b} mod {q}")
return (a, b)
x = ChineseRemainder(elt, [p, q], Verbose)
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode(self, z):\n if self.switch:\n x = self.bijecter(z, inverse=True)\n return self.decode_(x)\n else:\n return self.decode_(z)",
"def getReversePam(self):\n watson = \"ACGTYRSWKMBDHVN\"\n crick = \"TGCARYSWMKVHDBN\"\n return self.forwardPam[::-1].translate(\n self.forwardPam[::-1].maketrans(watson, crick)\n )",
"def reverseWithMap(pattern, keys):\n return \"\"",
"def greedy_decode(self, z):\r\n\r\n raise NotImplementedError",
"def reverseComplement(string):\n rMap = { \"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n return \"\".join(rMap[i] for i in string[::-1])",
"def retranslate(self):\r\n pass",
"def retranslate(self):\r\n pass",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def ReverseComplement1(seq):\n seq_dict = {'A':'T','T':'A','G':'C','C':'G'}\n return \"\".join([seq_dict[base] for base in reversed(seq)])",
"def translate_leet(phrase):",
"def decode(self, z):\n raise NotImplementedError",
"def buildCoder(shift):\n alphabet = string.ascii_lowercase \n alphabet2 = string.ascii_uppercase \n \n \n #Create our substitution dictionary \n dic={} \n dic2={}\n for i in range(0,len(alphabet)): \n dic[alphabet[i]]=alphabet[(i+shift)%len(alphabet)]\n dic2[alphabet2[i]]=alphabet2[(i+shift)%len(alphabet2)]\n \n dic.update(dic2)\n \n return dic",
"def decoder(self, z):\n x1 = self.dec_conv(z)\n return x1",
"def buildCoder(shift):\n mapper={}\n for ch in string.ascii_lowercase:\n if (ord(ch)+shift)>ord('z'):\n mapper[ch]=chr(ord(ch)+shift-ord('z')+ord('a')-1)\n else:\n mapper[ch]=chr(ord(ch)+shift)\n for ch in string.ascii_uppercase:\n if (ord(ch)+shift)>ord('Z'):\n mapper[ch]=chr(ord(ch)+shift-ord('Z')+ord('A')-1)\n else:\n mapper[ch]=chr(ord(ch)+shift)\n return mapper",
"def reverse(self): # real signature unknown; restored from __doc__\n pass",
"def z_rotation(self):\n before = ('R', 'r', 'U', 'u', 'L', 'l', 'D', 'd', 'M', 'E', 'x', 'y')\n after = ('U', 'u', 'L', 'l', 'D', 'd', 'R', 'r', 'E', 'M\\'', \"y\", \"x'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans",
"def translate(inp: str) -> str:\n\t# list for encdoe cirylic symbols in latinc.\n\tsymbols = (u\"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯöÖåÅ\",\n\t\t\tu\"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUAoOaA\")\n\t# generate dict like {\"a\":\"a\",\"б\":\"\",...}\n\ttr = {ord(a):ord(b) for a, b in zip(*symbols)}\n\t# switch all symbols\n\toutput = inp.translate(tr)\n\treturn output",
"def transbrl (arg):\r\n return n.translate(p.translate(arg))",
"def reverse_binary(df, target, new, label, label_map):\n recoded_df = df.copy()\n recoded_df[new] = 1 - recoded_df[target]\n \n label_map[new] = label\n \n return recoded_df",
"def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x590 < ord(msg[k]) < 0xfb50:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)",
"def _algorithm(self, rut):\n suma = 0\n multi = 2\n for r in rut[::-1]:\n suma += int(r) * multi\n multi += 1\n if multi == 8:\n multi = 2\n return u'0123456789K0'[11 - suma % 11]",
"def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 4, 4)\n result = self.decoder(result)\n return result",
"def strQ2B(ustring):\n rstring = \"\"\n for uchar in ustring:\n inside_code = ord(uchar)\n if inside_code == 12288: \n inside_code = 32\n elif (inside_code >= 65281 and inside_code <= 65374): \n inside_code -= 65248\n rstring += chr(inside_code)\n return rstring",
"def reverseComplementer(self, ORFsequence):\r\n reverseComplement = str()\r\n reverserDict = {\"A\":\"U\",\"U\":\"A\",\"C\":\"G\",\"G\":\"C\"}\r\n reversedseq = ORFsequence[::-1]\r\n for nucleotide in reversedseq:\r\n reverseComplement+=reverserDict[nucleotide]\r\n return reverseComplement",
"def question_new_translate():",
"def reverseComplement(s):\n\tcomplement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}\n\tt = ''\n\tfor base in s:\n\t\tt = complement[base] + t\n\treturn t",
"def TR_algo2(p, vd=2):\n # h will contain the Hilbert index\n h = 0\n # ve and vd contain the entry point and dimension of the current subcube\n # we choose here a main traversal direction N-2 (i.e. z for a cube) to match\n # the illustrations\n ve = 0\n for i in range(M-1, -1, -1):\n # the cell label is constructed in two steps\n # 1. extract the relevant bits from p\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n # transform l into the current subcube\n l = T(ve, vd, l)\n # obtain the gray code ordering from the label l\n w = inverse_gc(l)\n # compose (see [TR] lemma 2.13) the transform of ve and vd\n # with the data of the subcube\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % N\n # move the index to more significant bits and add current value\n h = (h << N) | w\n return h",
"def transform_phred_to_prob(c, offset=33):\n return 10**((ord(c) - offset) / (-10.0))",
"def translateBpn(bpnInput, translations):\n\n\t#Identify all keys to be replaced in the bpn based on the agreed regex \n\t# TODO come up with sth better than $$...$$: use different chars at beggining and end ",
"def string_reverser(our_string):\\\\\n\\\n # TODO: Write your solution here\\"
]
| [
"0.5949792",
"0.5576696",
"0.5559757",
"0.5489173",
"0.5471126",
"0.54544353",
"0.54544353",
"0.53757817",
"0.53221095",
"0.52354676",
"0.5227212",
"0.5202208",
"0.5193444",
"0.5171219",
"0.5162975",
"0.5156324",
"0.51469314",
"0.5143449",
"0.5138922",
"0.5122853",
"0.5093745",
"0.50829136",
"0.50690335",
"0.5068112",
"0.5067978",
"0.50607735",
"0.5058226",
"0.5052712",
"0.50462884",
"0.50388646"
]
| 0.6318922 | 0 |
BabyStep GiantStep solution for discrete algorithm problem. res = g^x mod modulo => log_g(res) = x mod modulo Use hash table for fast searching of huge values. | def bsgs(g: int, res: int, modulo: int):
assert millerRabin(modulo)
# https://en.wikipedia.org/wiki/Baby-step_giant-step
from ressources.multGroup import inv
m = integer_sqrt(modulo) + 1
hashTable = {square_and_multiply(g, j, modulo): j for j in range(m)} # Baby-Step
gm = square_and_multiply(g, m, modulo)
invGm = inv(gm, modulo)
# Initialization
y = res
# Search for an equivalence in the table - Giant-Step
for i in range(m):
if y in hashTable:
return i * m + hashTable[y]
y = (y * invGm) % modulo
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def r_soft_hash(x):\n if abs(x) < 1e-9:return 0\n # round it to some number of bits\n b = ns.round(ns.log(abs(x)) / ns.log(2))\n gran = 2**(b-30)\n return ns.round(x / gran) * gran",
"def PRGA(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n\n tab[i], tab[j] = tab[j], tab[i]\n K = tab[(tab[i] + tab[j]) % MOD]\n yield K",
"def g(i):\n return int(np.log2(gc(i)^gc(i+1)))",
"def hash_quadratic(d):\n # initialize table\n table = [\"-\"] * 19\n # consider each integer k in the input\n for k in d:\n # if k is already in the table this is a duplicate so move to next integer in the input\n # note this check for a duplicate is using the functionality of python rather than checking using a linear probe\n if k in table:\n continue\n # apply the hash function\n i = (6 * k + 3) % 19\n t = i\n # initialize count that checks whether linear probe has considered each bucket and is now full\n count = 0\n j = 0\n # while bucket is already filled\n\n while table[i] != '-':\n j += 1\n # move to next bucket\n i = (t + j ** 2) % 19\n # increment count\n count += 1\n\n # if table is full\n if count >= 18:\n # can return table as nothing further can be added\n break\n\n # Ensure table[i] is empty so k can be added here\n if table[i] == '-':\n table[i] = k\n\n # now each part of the input has been considered return the table\n return table",
"def bit_contribution_test(hash_function):\n\n model = hash_function()\n hash_list = []\n zero_str = '0' * 2048\n for i in range(1, 2049):\n for j in range(0, i):\n flip_str = zero_str[:j] + '1' + zero_str[j+1:i]\n hash_list.append(list(map(int, list(msg_to_bits.pad_msg(flip_str, i)))))\n if i % 200 == 0:\n print(i)\n\n hashed_dict = dict()\n collisions = 0\n i = 0\n for to_hash in hash_list:\n i += 1\n hash_val = model.hash(to_hash, False).tostring()\n if hash_val in hashed_dict:\n collisions += 1\n hashed_dict[hash_val] = True\n if i % 10000 == 0:\n print(i)\n\n return collisions",
"def G(k):\n return k^(k>>1)",
"def get_num_slots(self):\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.elements / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.pyy\n \"\"\"\n\n # Your code here\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n #return self.fnv1(key) % self.capacity\n<<<<<<< Updated upstream\n return self.djb2(key) % self.capacity\n=======\n return self.djb2(key) % len(self.storage)\n>>>>>>> Stashed changes\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # need to account for if the key value is the same \n\n i = self.hash_index(key)\n if not self.storage[i]:\n hte = HashTableEntry(key, value)\n self.storage[i] = hte\n self.elements += 1\n hte.head = HashTableEntry(key, value)\n elif self.storage[i] and self.storage[i].key != key:\n self.storage[i].insert_at_head(HashTableEntry(key, value))\n>>>>>>> Stashed changes\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n i = self.hash_index(key)\n node = self.storage[i]\n prev = None\n if node.key == key:\n self.storage[i] = node.next\n return\n while node != None:\n if node.key == key:\n prev.next = node.next\n self.storage[i].next = None\n return\n prev = node\n node = node.next\n self.elements -= 1\n return\n>>>>>>> Stashed changes\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # - find the index in the hash table for the key\n i = self.hash_index(key)\n # - search the list for that key\n if not self.storage[i]:\n return None\n else:\n if self.storage[i].find_key(key) == key:\n return self.storage[i].value\n>>>>>>> Stashed changes\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n prev_storage = self.storage\n self.capacity = new_cap\n self.storage = [None] * new_cap\n for i in range(len(prev_storage)):\n prev = prev_storage[i]\n if prev:\n while prev:\n if prev.key:\n self.put(prev.key, prev.value)\n prev = prev.next\n\n>>>>>>> Stashed changes\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n\n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")",
"def gulp(meal):\n #print(\"Proccessing...\")\n broth = meal\n chunks = [broth[i:i+256] for i in range(0, len(broth), 256)]\n #print(\"Number of chunks:\", len(chunks))\n #print(\" - Chunkified...\")\n digest = [0]*64\n i1 = 1\n global Ti\n Ti = 0\n for chunk in chunks:\n t1 = time.time()\n \n if i1 == 1:\n chewed = chew(chunk)\n digest = chewed\n else:\n chewed = chew(chunk)\n digest = [(int(x) + int(y))%16 for x, y in zip(digest, chewed)]\n \n t2 = time.time()\n #print(\"time taken:\", t2-t1)\n Ti += t2-t1\n #print(\"time TOTAL:\", Ti)\n #print(\" - Chewed!\")\n \n i1 += 1\n #print(\"Hash finished.\")\n return digest",
"def h_python(key, N):\n return hash(key) % N",
"def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1",
"def get_hash_value(table, prime, multiplier, start, length):\n y = pow(multiplier, length, prime)\n hash_value = (table[start+length] - y*table[start]) % prime\n return hash_value",
"def prime_mod_hash(self, key: T) -> int:\n return (self.a * self.encode(key)) % self.table_size",
"def PRGA_custom(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n yield i+j",
"def solve():\n result = 1\n map = dict()\n for x in range(2, 20):\n temp = prime_factors(x)\n for n in range(2, 20):\n if n in temp:\n if n in map:\n map[n] = max(temp.count(n), map[n])\n else:\n map[n] = temp.count(n)\n\n for x in map:\n result *= (x ** map[x])\n\n return result",
"def parity_of_very_long(x, word_size=8):\n res = 0\n hash_map = {}\n while x!=0:\n word = x & ( (1<<word_size)-1)\n if not(word in hash_map):\n hash_map[word] = parityOf(word)\n res ^= hash_map[word]\n x >>= word_size\n print(hash_map)\n return res",
"def hash_double(d):\n # initialize table\n table = [\"-\"] * 19\n # consider each integer k in the input\n for k in d:\n # if k is already in the table this is a duplicate so move to next integer in the input\n # note this check for a duplicate is using the functionality of python rather than checking using a linear probe\n if k in table:\n continue\n # apply the hash function\n i = (6 * k + 3) % 19\n t = i\n # initialize count that checks whether linear probe has considered each bucket and is now full\n count = 0\n j = 0\n # while bucket is already filled\n s = 11 - (k % 11)\n while table[i] != '-':\n j += 1\n # move to next bucket\n i = (t + j*s) % 19\n # increment count\n count += 1\n\n # if table is full\n if count >= 18:\n # can return table as nothing further can be added\n break\n\n # Ensure table[i] is empty so k can be added here\n if table[i] == '-':\n table[i] = k\n\n # now each part of the input has been considered return the table\n return table",
"def pollard_rho(g: int, h: int, n: int, order: int = None):\n x = {0: 1}\n a = {0: 0}\n b = {0: 0}\n\n import ressources.multGroup as multGroup\n\n if order is None:\n order = multGroup.multiplicativeOrder(g, n)\n\n # from a, b and c, partitioning the field\n def step_xab(x, a, b, g, h, order, n):\n s = x % 3\n\n # S1\n if s == 1:\n x = x * h % n\n b = (b + 1) % order\n\n # S2\n if s == 0:\n x = square_and_multiply(x, 2, n)\n a = 2 * a % order\n b = 2 * b % order\n\n # S3\n if s == 2:\n x = x * g % n\n a = (a + 1) % order\n\n return x, a, b\n\n # returns x, a, b for a given i using memoization\n def get_xab(i):\n\n if i not in x:\n _x, _a, _b = get_xab(i - 1)\n\n x[i], a[i], b[i] = step_xab(_x, _a, _b, g, h, order, n)\n\n return x[i], a[i], b[i]\n\n def naturals_from(i):\n while True:\n # yield is a keyword that is used like return, except the function will return a generator.\n # https://www.google.com/search?client=firefox-b-d&q=yield+python\n yield i\n i += 1\n\n for i in naturals_from(1):\n\n x_i, a_i, b_i = get_xab(i)\n x_2i, a_2i, b_2i = get_xab(2 * i)\n\n if x_i == x_2i:\n\n r = (b_i - b_2i) % order\n\n if r == 0:\n return False\n\n return multGroup.inv(r, order) * (a_2i - a_i) % order",
"def search(self, L: int, a: int, modulus: int, n: int, nums: List[int]) -> str:\n # compute the hash of string S[:L]\n h = 0\n for i in range(L):\n h = (h * a + nums[i]) % modulus\n \n # already seen hashes of strings of length L\n seen = {h} \n # const value to be used often : a**L % modulus\n aL = pow(a, L, modulus) \n for start in range(1, n - L + 1):\n # compute rolling hash in O(1) time\n h = (h * a - nums[start - 1] * aL + nums[start + L - 1]) % modulus\n if h in seen:\n return start\n seen.add(h)\n return -1",
"def measure_gcd_success():\n for size in range(2,16):\n print(\"--------- samplesize = %d\" % size)\n d = dict()\n for _ in range(1000):\n q = findpoly(size)\n d.setdefault(q,0)\n d[q] += 1\n for k,v in sorted(d.items(), key=lambda x: x[1]):\n print(\"%5d: %8s\" % (v, k))",
"def zzx_mod_gcd(f, g, **flags):\n if not (f or g):\n return [], [], []\n elif not f:\n return g, [], [1]\n elif not g:\n return f, [1], []\n\n n = zzx_degree(f)\n m = zzx_degree(g)\n\n cf = zzx_content(f)\n cg = zzx_content(g)\n\n gcd = igcd(cf, cg)\n\n f = [ c // gcd for c in f ]\n g = [ c // gcd for c in g ]\n\n if n == 0 or m == 0:\n return [gcd], f, g\n\n A = max(zzx_abs(f) + zzx_abs(g))\n b = igcd(poly_LC(f), poly_LC(g))\n\n B = int(ceil(2**n*A*b*int(sqrt(n + 1))))\n k = int(ceil(2*b*log((n + 1)**n*A**(2*n), 2)))\n l = int(ceil(log(2*B + 1, 2)))\n\n prime_max = max(int(ceil(2*k*log(k))), 51)\n\n while True:\n while True:\n primes = set([])\n unlucky = set([])\n\n ff, gg, hh = {}, {}, {}\n\n while len(primes) < l:\n p = randprime(3, prime_max+1)\n\n if (p in primes) or (b % p == 0):\n continue\n\n F = gf_from_int_poly(f, p)\n G = gf_from_int_poly(g, p)\n\n H = gf_gcd(F, G, p)\n\n primes.add(p)\n\n ff[p] = F\n gg[p] = G\n hh[p] = H\n\n e = min([ gf_degree(h) for h in hh.itervalues() ])\n\n for p in set(primes):\n if gf_degree(hh[p]) != e:\n primes.remove(p)\n unlucky.add(p)\n\n del ff[p]\n del gg[p]\n del hh[p]\n\n if len(primes) < l // 2:\n continue\n\n while len(primes) < l:\n p = randprime(3, prime_max+1)\n\n if (p in primes) or (p in unlucky) or (b % p == 0):\n continue\n\n F = gf_from_int_poly(f, p)\n G = gf_from_int_poly(g, p)\n\n H = gf_gcd(F, G, p)\n\n if gf_degree(H) != e:\n unlucky.add(p)\n else:\n primes.add(p)\n\n ff[p] = F\n gg[p] = G\n hh[p] = H\n\n break\n\n fff, ggg = {}, {}\n\n for p in primes:\n fff[p] = gf_quo(ff[p], hh[p], p)\n ggg[p] = gf_quo(gg[p], hh[p], p)\n\n F, G, H = [], [], []\n\n crt_mm, crt_e, crt_s = crt1(primes)\n\n for i in xrange(0, e + 1):\n C = [ b * poly_nth(hh[p], i) for p in primes ]\n c = crt2(primes, C, crt_mm, crt_e, crt_s, True)\n\n H.insert(0, c)\n\n H = zzx_strip(H)\n\n for i in xrange(0, zzx_degree(f) - e + 1):\n C = [ poly_nth(fff[p], i) for p in primes ]\n c = crt2(primes, C, crt_mm, crt_e, crt_s, True)\n\n F.insert(0, c)\n\n for i in xrange(0, zzx_degree(g) - e + 1):\n C = [ poly_nth(ggg[p], i) for p in primes ]\n c = crt2(primes, C, crt_mm, crt_e, crt_s, True)\n\n G.insert(0, c)\n\n H_norm = zzx_l1_norm(H)\n\n F_norm = zzx_l1_norm(F)\n G_norm = zzx_l1_norm(G)\n\n if H_norm*F_norm <= B and H_norm*G_norm <= B:\n break\n\n return zzx_mul_const(H, gcd), F, G",
"def slowfun(x, y):\n tmp_key = str(x) + \"_\" + str(y)\n\n # Does the lookup key exist in lookup_dict?\n if tmp_key in lookup_dict:\n # Found key, return precalculated value\n return lookup_dict[tmp_key]\n\n # Generate \n v = math.pow(x, y)\n v = math.factorial(v)\n v //= (x + y)\n v %= 982451653\n \n lookup_dict[tmp_key] = v\n\n return v",
"def calHash(n, m):\n return int(m*BloomFilter.ln2/n)",
"def linear_probing(hash, A, x):\n i = 0\n s = len(A)\n while A[getRemainder(hash(x)+i, s)] is not None:\n i = i + 1\n index = getRemainder(hash(x)+i, s)\n print(f'Linear Probing index is: {index}')\n A[index] = x\n print(f'insertion of {x}: {A}')",
"def fn(k):\n seen = set()\n for i in range(len(s)-k+1): \n val = (prefix[i+k] - prefix[i]*fac[k]) % MOD \n if val in seen: return True # rolling hash (ver. Monte Carlo)\n seen.add(val)\n return False",
"def zzx_heu_gcd(f, g, **flags):\n def interpolate(h, x):\n f = []\n\n while h:\n g = h % x\n\n if g > x // 2:\n g -= x\n\n f.insert(0, g)\n h = (h-g) // x\n\n return f\n\n def finalize(h, cff, cfg, gcd):\n h = zzx_mul_const(h, gcd)\n return h, cff, cfg\n\n if not (f or g):\n return [], [], []\n elif not f:\n return g, [], [1]\n elif not g:\n return f, [1], []\n\n df = zzx_degree(f)\n dg = zzx_degree(g)\n\n cf = zzx_content(f)\n cg = zzx_content(g)\n\n gcd = igcd(cf, cg)\n\n f = [ c // gcd for c in f ]\n g = [ c // gcd for c in g ]\n\n if df == 0 or dg == 0:\n return [gcd], f, g\n\n f_norm = zzx_max_norm(f)\n g_norm = zzx_max_norm(g)\n\n B = 2*min(f_norm, g_norm) + 29\n\n x = max(min(B, 99*INT_TYPE(isqrt(B))),\n 2*min(f_norm // abs(poly_LC(f)),\n g_norm // abs(poly_LC(g))) + 2)\n\n for i in xrange(0, 6):\n ff = zzx_eval(f, x)\n gg = zzx_eval(g, x)\n\n if ff and gg:\n h = igcd(ff, gg)\n\n cff = ff // h\n cfg = gg // h\n\n h = interpolate(h, x)\n h = zzx_primitive(h)[1]\n\n cff_, r = zzx_div(f, h)\n\n if not r:\n cfg_, r = zzx_div(g, h)\n\n if not r:\n return finalize(h, cff_, cfg_, gcd)\n\n cff = interpolate(cff, x)\n\n h, r = zzx_div(f, cff)\n\n if not r:\n cfg_, r = zzx_div(g, h)\n\n if not r:\n return finalize(h, cff, cfg_, gcd)\n\n cfg = interpolate(cfg, x)\n\n h, r = zzx_div(g, cfg)\n\n if not r:\n cff_, r = zzx_div(f, h)\n\n if not r:\n return finalize(h, cff_, cfg, gcd)\n\n x = INT_TYPE(2.7319*x*isqrt(isqrt(x)))\n\n raise HeuristicGCDFailed('no luck')",
"def modulo_power(x, b):\n \n r = x % b\n ct = 0\n pows = {}\n while r not in pows:\n pows[r] = ct\n ct += 1\n r = x * r % b\n return ct - pows[r]",
"def getrandbits(k: int) -> int:\n ...",
"def idcg(k):\n res = sum([1.0 / math.log(i + 2, 2) for i in range(k)])\n if not res:\n return 1.0\n else:\n return res",
"def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1",
"def gregory_leibniz():\n\n print(\"Gregory-Leibniz\\n===============\")\n\n iterations = 400000\n denominator = 1.0\n multiplier = 1.0\n pi = (4.0 / denominator)\n\n for i in range(2, iterations + 1):\n denominator += 2.0\n multiplier *= -1.0\n pi += ( (4.0 / denominator) * multiplier )\n\n print_as_text(pi)"
]
| [
"0.63904697",
"0.63661796",
"0.62169975",
"0.6154469",
"0.61477077",
"0.6140015",
"0.60482365",
"0.6001942",
"0.59961516",
"0.59816265",
"0.5975602",
"0.5973159",
"0.59528214",
"0.59457916",
"0.5908312",
"0.5883713",
"0.5881319",
"0.5855098",
"0.5848327",
"0.5817985",
"0.580949",
"0.57924396",
"0.576794",
"0.57566833",
"0.5755476",
"0.5741295",
"0.5735294",
"0.573323",
"0.5718051",
"0.57070065"
]
| 0.76539564 | 0 |
Get project scoped token suitable for instance creation. | def get_project_token() -> str:
body = app.config["TOKEN_BODY"].copy()
if app.config.get("ADMIN_PROJECT_ID") is None:
app.config["ADMIN_PROJECT_ID"] = get_admin_project_id()
body["auth"]["scope"] = {"project": {"id": app.config["ADMIN_PROJECT_ID"]}}
token_rq = request(method="POST", url=app.config["TOKEN_REF"], json=body,)
if not token_rq.ok:
raise HTTPError(token_rq.status_code)
return token_rq.headers["X-Subject-Token"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')",
"def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token",
"def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()",
"def current_token() -> object:\n return get_async_backend().current_token()",
"def token():\n return os.environ.get('TOKEN', None)",
"def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token",
"def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token",
"def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )",
"def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']",
"def get_token(self):\n token = self._session.token\n return token",
"def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})",
"def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token",
"def _get_token(self):\n return user.get_token()",
"def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']",
"def get_project_access_token(*args, **kwargs):\n return get_project_access_token_async(*args, **kwargs).get_result()",
"def get_scoped_token(adminUser, adminPassword, contract, projectid, region):\n identityURL = 'https://identity.' + region + \\\n '.cloud.global.fujitsu.com/v3/auth/tokens'\n\n try:\n response = requests.post(identityURL,\n headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'},\n json={\"auth\":\n {\"identity\":\n {\"methods\": [\"password\"], \"password\":\n {\"user\":\n {\"domain\":\n {\"name\": contract},\n \"name\": adminUser,\n \"password\": adminPassword\n }}},\n \"scope\":\n {\"project\":\n {\"id\": projectid\n }}}})\n\n return response\n except:\n return 'Regional Project Token Scoping Failure'",
"def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token",
"def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token",
"def get_token(self):\n\n return self._token",
"def get_token(self):\n return self.__token",
"def get_token(self):\n return self.__token",
"def get_mgnt_token():\n # Get the Environment variables from App Container.\n app_auth_token = os.getenv('APP_AUTHENTICATION_TOKEN')\n app_endpoint_ip = os.getenv('APPS_API_ENDPOINT_IP')\n app_endpoint_port = os.getenv('APPS_API_ENDPOINT_PORT')\n\n\n # Initialize the client.\n app_cli = AppClient(app_auth_token, app_endpoint_ip, app_endpoint_port)\n app_cli.config.disable_logging()\n\n # Get the management access token.\n token = app_cli.token_management\n mgmt_auth_token = token.create_management_access_token()\n return mgmt_auth_token",
"def get_client_token(**_):\n return str(uuid.uuid4())"
]
| [
"0.6736661",
"0.6736661",
"0.6736661",
"0.6723815",
"0.66672206",
"0.6584221",
"0.65270853",
"0.64946264",
"0.6408879",
"0.6393113",
"0.6387492",
"0.63707364",
"0.63696975",
"0.634789",
"0.634789",
"0.6322874",
"0.6322874",
"0.6322874",
"0.6304796",
"0.62858045",
"0.62819093",
"0.625656",
"0.6241003",
"0.6239239",
"0.6233833",
"0.6212306",
"0.61718893",
"0.61718893",
"0.61534745",
"0.61384887"
]
| 0.7176986 | 0 |
Find network id by it's name | def get_network_id_by_name(name: str) -> str:
networks_info = get_networks()
for network in networks_info["networks"]:
if network["name"] == name:
return network["id"]
raise AttributeError(f"No network named {name}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id",
"def get_net_id(self, net_name):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n\n if result is None:\n LOG_OBJ.error(\n \"No response from Server while trying to\"\n \" get networks of tenant: %s\" %\n self.project_info[\"project_id\"])\n return result\n\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n\n output = json.loads(result.data)\n LOG_OBJ.debug(\"Networks: %s\" % output['networks'])\n\n for nets in output['networks']:\n if nets['name'].lower() == net_name.lower() and \\\n net_name == config.extnet_name:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n if nets['name'].lower() == net_name.lower() and \\\n nets['tenant_id'] == self.project_info[\"project_id\"]:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n\n LOG_OBJ.debug(\"Net:%s Not Found\" % net_name)\n return",
"def get_network_id(options, network):\n service_instance = get_vc_content(options)\n datacenter = get_datacenter(options)\n for item in datacenter.networkFolder.childEntity:\n if (item.name == network):\n return item._GetMoId()",
"def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None",
"def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")",
"def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def get_id(cls, name):\n assert name, 'name is empty'\n if name in cls._ids:\n return cls._ids[name]\n sql = \"SELECT id FROM hive_communities WHERE name = :name\"\n cid = DB.query_one(sql, name=name)\n if cid:\n cls._ids[name] = cid\n cls._names[cid] = name\n return cid",
"def get_interface_by_net_name(cls, node_id, netname):\n iface = db().query(models.NodeNICInterface).join(\n (models.NetworkGroup,\n models.NodeNICInterface.assigned_networks_list)\n ).filter(\n models.NetworkGroup.name == netname\n ).filter(\n models.NodeNICInterface.node_id == node_id\n ).first()\n if iface:\n return iface\n\n return db().query(models.NodeBondInterface).join(\n (models.NetworkGroup,\n models.NodeBondInterface.assigned_networks_list)\n ).filter(\n models.NetworkGroup.name == netname\n ).filter(\n models.NodeBondInterface.node_id == node_id\n ).first()",
"def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))",
"def _get_network_id(self):\n pubnet = self.conn.network.find_network('public')\n net = self.conn.network.find_network(self.net_conf['net_name'])\n subnet = self.conn.network.find_subnet(self.net_conf['subnet_name'])\n # TODO: Add support for security group\n\n self.network_id = {\n 'public': pubnet.id,\n 'net': net.id,\n 'subnet': subnet.id\n }",
"def retrieve_node_id(self, wg, node_name):\n\n result = self.retrieve(\"\"\"SELECT id FROM nodes\n WHERE wg = %s AND lower(name) = %s\"\"\",\n (utils.wg_as_int(wg), node_name.strip().lower(), ))\n\n if not result:\n raise UnknownNodeError(\"node does not exist\")\n else:\n return int(result[0][0])",
"def _get_network(name):\n\n if name not in _NAME_TO_NETS:\n raise ValueError('Network name [%s] not recognized.' % name)\n return _NAME_TO_NETS[name].model",
"def lookup_netid(self, netid):\n self.setQuery(\"\"\"Select ?uid where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> \"%s\" .\n ?who <http://vivo.dartmouth.edu/ontology/geiselId> ?uid .\n }\"\"\" % (netid))\n\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return g['results']['bindings'][0]['uid']['value']\n except:\n return None",
"def GetCurrentNetworkID(self, iwconfig=None):\n currentESSID = self.GetCurrentNetwork(iwconfig)\n for x in xrange(0, len(self.LastScan)):\n if self.LastScan[x]['essid'] == currentESSID:\n return x\n if self.debug_mode:\n print 'GetCurrentNetworkID: Returning -1, current network not found'\n return -1",
"def get_network_by_name(self, name: str) -> Network:\n for network in self._networks:\n if network.name == name:\n return network\n raise errors.NotFoundError(f\"there exists no network named {name!r}\")",
"def layer_by_name(net, name):\n for l in net.named_modules():\n if l[0] == name:\n return l[1]",
"def network_id_to_name(network_id: int) -> str:\n return {\n 1: \"main\",\n 3: \"ropsten\",\n 4: \"rinkeby\",\n 5: \"goerli\",\n 42: \"kovan\",\n 100: \"xdai\",\n 56: \"bsc-main\",\n }[network_id]",
"def _get_vm_id_by_name(self, vm_name):\n vm_info = self.connection.compute.find_server(vm_name)\n return (vm_info.id if vm_info else None)",
"def get_dnid_by_dnname(self, dnname):\r\n for dn in self.dns:\r\n if dn.name == dnname:\r\n return dn.id\r\n return None",
"def get_device_type_by_name(name):\n\n type_id = None\n # try:\n for link in NetworkAdapter.LINKS:\n if link['name'] == name:\n type_id = link['id']\n break\n # except IndexError:\n # raise Exception('Parent category does not exist')\n if not type_id:\n raise Exception('Wrong type network adapter')\n\n return type_id",
"def get_model_id(model_name, workspace, header, user):\n uri = \"https://api.anaplan.com/1/3/workspaces/{}/models/\".format(workspace)\n response = requests.get(uri, headers = header)\n response_json = json.loads(models.text.encode(\"utf-8\"))\n for model in response_json:\n if model[u\"name\"] == unicode(model_name):\n return model[u\"id\"]",
"def output_name_to_id(self, name):\n for i, o in list(r.outputs.items()):\n if o.name == name:\n return i",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def get_vmid_by_name(cls, container, datacenter, name):\n\n obj = Query.get_obj(container, datacenter)\n\n # recurse through datacenter object attributes looking for vm that\n # matches hostname.\n if hasattr(obj, 'vmFolder'):\n for virtmachine in obj.vmFolder.childEntity:\n if hasattr(virtmachine, 'childEntity'):\n for virt in virtmachine.childEntity:\n if virt.name == name:\n return virt._moId\n else:\n if virt.name == name:\n return virt._moId\n return None",
"def GetNetworkID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_network_by_identity(self, identity):\n for network in self.mobile_networks:\n identities = network.get(\"Operator Identities\")\n if identities is not None:\n if identity in identities:\n return network\n return None",
"def get_transport_id_by_name(transport_name: str) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id from transport where name = '{}';\".format(transport_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]"
]
| [
"0.7981729",
"0.74194354",
"0.6850195",
"0.6796734",
"0.6718263",
"0.671195",
"0.665859",
"0.665859",
"0.665859",
"0.65889955",
"0.658055",
"0.65149903",
"0.6505313",
"0.6490205",
"0.6374185",
"0.6365825",
"0.63096875",
"0.6294096",
"0.62506366",
"0.62314224",
"0.6166681",
"0.6151831",
"0.6134551",
"0.6042212",
"0.60419494",
"0.60080624",
"0.60070366",
"0.6002635",
"0.6002208",
"0.6000571"
]
| 0.83324456 | 0 |
Get admin project identificator | def get_admin_project_id() -> str:
token_rq = request(
method="POST", url=app.config["TOKEN_REF"], json=app.config["TOKEN_BODY"],
)
if not token_rq.ok:
raise HTTPError(token_rq.status_code)
projects_rq = request(
method="GET",
url=app.config["PROJECTS_REF"],
headers=build_header(token_rq.headers["X-Subject-Token"]),
)
if not projects_rq.ok:
raise HTTPError(projects_rq.status_code)
admin_prj_id = None
for project in projects_rq.json()["projects"]:
if project["name"] == "admin":
admin_prj_id = project["id"]
break
else:
raise ValueError("Admin project id not found")
return admin_prj_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_project_admin_instance_name(projectname):\n\n return \"{}admin\".format(projectname.lower())",
"def get_project_admin_instance_name(self):\n\n return get_project_admin_instance_name(self.short_name)",
"def get_project(self):\n if self.api_version == 2:\n return self.creds.get('tenant_id') or self.creds.get('tenant_name')\n else:\n return self.creds.get('project_id') or self.creds.get('project_name')",
"def get_projectname(project_admin_instance_name):\n if not \"admin\" in project_admin_instance_name:\n raise ValueError(\"expected an admin site instance name ending in 'admin',\"\n \" but did not find this in value '{}'\".format(project_admin_instance_name))\n return project_admin_instance_name[:-5]",
"def getDbAdminUser():\n if \"DB_ADMIN\" in controller.CONF.keys():\n return controller.CONF[\"DB_ADMIN\"]\n\n return basedefs.DB_ADMIN",
"def tenant_project_id(self) -> str:\n return pulumi.get(self, \"tenant_project_id\")",
"def getId(self):\n return self.getUserName()",
"def getId(self):\n return self.getUserName()",
"def get_id(self):\r\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def admin_username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def get_project_id():\n return os.environ.get('project')",
"def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")",
"def user(self):\n return self._project.user",
"def managed_by_tenant_id(self) -> str:\n return pulumi.get(self, \"managed_by_tenant_id\")",
"def __getNewAdminID(self):\n return db_main.getHandle().seqNextVal(\"admins_id_seq\")",
"def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")",
"def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")",
"def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")",
"def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")",
"def _current_login_user(self):\n return self.env.uid",
"def get_project_id(self):\n return self._project_id",
"def administrators(self):\n store = self['__store']\n admin_group = store.get(self.get('admin_group_id', None))\n if admin_group:\n return admin_group.name\n return 'nothing'",
"def managee_tenant_id(self) -> str:\n return pulumi.get(self, \"managee_tenant_id\")",
"def _get_project_id():\n\n extras = BaseHook.get_connection('google_cloud_default').extra_dejson\n key = 'extra__google_cloud_platform__project'\n if key in extras:\n project_id = extras[key]\n else:\n raise ('Must configure project_id in google_cloud_default '\n 'connection from Airflow Console')\n return project_id"
]
| [
"0.7387236",
"0.69870627",
"0.69535977",
"0.68997014",
"0.6872954",
"0.6857882",
"0.6620361",
"0.6620361",
"0.6566122",
"0.65610564",
"0.65610564",
"0.65610564",
"0.6554312",
"0.65290195",
"0.65290195",
"0.65290195",
"0.6526947",
"0.6487364",
"0.6470634",
"0.6441271",
"0.63720155",
"0.6350278",
"0.6350278",
"0.6350278",
"0.6350278",
"0.6322736",
"0.63219786",
"0.6287324",
"0.62465096",
"0.620265"
]
| 0.7840168 | 0 |
Make a request to get active instances from devstack. | def get_instances() -> dict:
url = f"{app.config['COMPUTE_SERVERS_REF']}/detail"
instances_rq = request(
method="GET", url=url, headers=build_header(), params={"vm_state": "active"},
)
if not instances_rq.ok:
HTTPError(instances_rq.status_code)
answer = {"servers": list()}
for instance in instances_rq.json()["servers"]:
instance_info = dict(name=instance["name"])
instance_info["ip_addresses"] = list()
for network, info in instance["addresses"].items():
instance_info["ip_addresses"].extend(entry["addr"] for entry in info)
answer["servers"].append(instance_info)
return answer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")",
"def _wait_active(client, request_id, max_num):\n logging.info('Waiting for instances to be active')\n while True:\n res = client.describe_spot_fleet_instances(SpotFleetRequestId=request_id)\n if len(res['ActiveInstances']) == max_num:\n logging.info('Instances are active now.')\n instance_id_list = [ i['InstanceId'] for i in res['ActiveInstances'] ]\n return instance_id_list\n time.sleep(10)",
"def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body",
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()",
"def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json",
"def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances",
"def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)",
"def show_instances():\n return get_instances()",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def pass_instances(self, active=None):\n\t\tif active is None:\n\t\t\tpath = f'{self.BIKE_ENDPOINT}user/current/pass_instance?{self.secret_key}'\n\t\t\tresponse = requests.get(path, headers=self.AUTH_HEADER).json()\n\t\telif active:\n\t\t\tpath = f'{self.BIKE_ENDPOINT}user/current/pass_instance?active=true&{self.secret_key}'\n\t\t\tresponse = requests.get(path, headers=self.AUTH_HEADER).json()\n\t\telif not active:\n\t\t\tpath = f'{self.BIKE_ENDPOINT}user/current/pass_instance?active=false&{self.secret_key}'\n\t\t\tresponse = requests.get(path, headers=self.AUTH_HEADER).json()\n\n\t\treturn response",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def list_instances_xhr():\n if request.method == \"GET\":\n instances = list_instances_request()\n user_groups_list = list_user_groups(session)\n user_groups = []\n for groups in user_groups_list:\n user_groups.append(groups[\"metadata\"][\"name\"])\n return jsonify(instances, user_groups)",
"def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)",
"def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)",
"def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances",
"def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))",
"def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp",
"def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]",
"def instances(self):\n return self.get('instances')",
"def index(self, req):\n LOG.info(\"List all the nova-compute hosts in the system\")\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n services = dbapi.service_get_all_compute_sorted(ctxt)\n # services looks like (Service(object), Decimal('0'))\n # must convert from Decimal('0') to int() because no JSON repr\n hosts = [{'name':srv[0].host,\n 'instanceCount':int(srv[1])}\n for srv in services]\n return {'hosts': hosts}",
"def list_instances(self):\n # list instances\n self._list_instances()",
"def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)",
"def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)",
"def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return",
"def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]",
"def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")",
"def api(self):\n res = self.client.call('/', 'GET', data='')\n self.logger.debug('Get openstack identity api versions: %s' % truncate(res))\n return res[0]"
]
| [
"0.62801725",
"0.6206286",
"0.6043972",
"0.60251874",
"0.601637",
"0.6010178",
"0.600573",
"0.59135395",
"0.59109515",
"0.59018254",
"0.580947",
"0.57854295",
"0.5749351",
"0.5730138",
"0.56535864",
"0.5635016",
"0.5552074",
"0.55247796",
"0.5519387",
"0.5506227",
"0.5499615",
"0.54896504",
"0.54835904",
"0.5436819",
"0.5420648",
"0.54187006",
"0.5402021",
"0.5382925",
"0.53671753",
"0.536619"
]
| 0.63484895 | 0 |
Find flavor id by it's name. | def find_flavor_id(flavor_name: str):
for flavor in get_flavors()["flavors"]:
if flavor_name == flavor["name"]:
return flavor["id"]
raise AttributeError(f"No flavor '{flavor_name}' found") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_flavor_id(self, flavor_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \\\n \"/flavors/detail\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while getting flavors.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get flavor ID Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n\n for flavors in output['flavors']:\n if flavors['name'].lower() == flavor_name.lower():\n LOG_OBJ.debug(\"Flavor Name: %s, ID: %s\" % (flavor_name,\n flavors['id']))\n return flavors['id']\n\n LOG_OBJ.error(\"Flavor:%s is NOT found\" % flavor_name)",
"def get_flavor(name):\r\n return nova.flavors.find(name=name)",
"def flavor(self, name=None):\n return self.find(self.flavors(), name=name)",
"def _get_flavor_name(self, flavor_id):\n for name, f_id in FLAVOR_ID.items():\n if f_id == flavor_id:\n return name",
"def find_flavor(self, name_or_id, ignore_missing=False):\n return self._find(_flavor.Flavor, name_or_id,\n ignore_missing=ignore_missing)",
"def find_flavor(self, name_or_id, ignore_missing=True):\n return self._find(\n _flavor.Flavor, name_or_id, ignore_missing=ignore_missing\n )",
"def flavor_id(self):\n return self._flavor_id",
"def flavor_id(self):\n return self._flavor_id",
"def _generate_flavorid(self):\n nonexistent_flavor = 2700\n flavor_ids = [value[\"id\"] for key, value in\n instance_types.get_all_types().iteritems()]\n while nonexistent_flavor in flavor_ids:\n nonexistent_flavor += 1\n else:\n return nonexistent_flavor",
"def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)",
"def get_flavor_by_uuid(cls, flavor_uuid):\n return cls.dbdriver.get_flavor_by_uuid(flavor_uuid)",
"def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (self.catalog['compute'], flavor_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavor']\n else:\n LOG.error('Get flavor failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def flavor(self, name=None):\n raise NotImplementedError",
"def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)",
"def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)",
"def fregion_id_by_name(name=None):\n f_region_types = FilteredElementCollector(doc).OfClass(FilledRegionType)\n for fregion_type in f_region_types:\n fregion_name = Element.Name.GetValue(fregion_type)\n if not name or name.lower() == fregion_name.lower():\n return fregion_type.Id\n # Loops through all, not found: use last\n else:\n print('Color not specified or not found.')\n return fregion_type.Id",
"def get_flavor(flavor_id, include_deleted=False):\n\n try:\n flavor_id = int(flavor_id)\n if include_deleted:\n return Flavor.objects.get(id=flavor_id)\n else:\n return Flavor.objects.get(id=flavor_id, deleted=include_deleted)\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid flavor ID '%s'\" % flavor_id)\n except Flavor.DoesNotExist:\n raise faults.ItemNotFound('Flavor not found.')",
"def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()",
"def get_flavor(self, request, tenant_id, flavor_id):\n response_data = get_flavor(flavor_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])",
"def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor",
"def _existing_flavor(self):\n return instance_types.get_all_types().keys()[0]",
"def get_preset_by_id(preset_id: Text):\n presets = get_presets()[\"presets\"]\n for preset in presets:\n if preset_id == preset[\"id\"]:\n return preset",
"def get_preset_by_id(preset_id: Text):\n presets = get_presets()[\"presets\"]\n for preset in presets:\n if preset_id == preset[\"id\"]:\n return preset",
"def varray_query(self, name):\n if common.is_uri(name):\n return name\n\n uris = self.varray_list()\n\n for uri in uris:\n varray = self.varray_show(uri)\n if varray and varray['name'] == name:\n return varray['id']\n\n raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR,\n (_(\"varray %s: not found\") % name))",
"def _get_flavor_ref(self, flavor):\n flavor_obj = None\n if isinstance(flavor, CloudDatabaseFlavor):\n flavor_obj = flavor\n elif isinstance(flavor, int):\n # They passed an ID or a size\n try:\n flavor_obj = self.get_flavor(flavor)\n except exc.NotFound:\n # Must be either a size or bad ID, which will\n # be handled below\n pass\n if flavor_obj is None:\n # Try flavor name\n flavors = self.list_flavors()\n try:\n flavor_obj = [flav for flav in flavors\n if flav.name == flavor][0]\n except IndexError:\n # No such name; try matching RAM\n try:\n flavor_obj = [flav for flav in flavors\n if flav.ram == flavor][0]\n except IndexError:\n raise exc.FlavorNotFound(\"Could not determine flavor from \"\n \"'%s'.\" % flavor)\n # OK, we have a Flavor object. Get the href\n href = [link[\"href\"] for link in flavor_obj.links\n if link[\"rel\"] == \"self\"][0]\n return href",
"def get_id_from_name(item_name):\n try:\n return next(item for item in mapping if item[\"name\"].lower() == item_name.lower())[\"id\"]\n except StopIteration:\n return None",
"def find_resource_by_name_or_id(self, resource_name, value):\n try:\n entity = getattr(self.client(), resource_name)\n return entity.get(value).id\n except sahara_base.APIException:\n return self.find_resource_by_name(resource_name, value)",
"def FindName(*args, **kwargs):\n return _gdi_.ColourDatabase_FindName(*args, **kwargs)",
"def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'",
"def test_get_flavor(self):\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertEqual(self.flavor_ref, flavor.id)"
]
| [
"0.8154346",
"0.752519",
"0.7301478",
"0.72637576",
"0.7042125",
"0.70403224",
"0.6932511",
"0.6932511",
"0.6680423",
"0.66455835",
"0.6394901",
"0.63073426",
"0.626384",
"0.61803275",
"0.61803275",
"0.6032037",
"0.59394425",
"0.5854552",
"0.5762916",
"0.5756223",
"0.57414234",
"0.56874174",
"0.56874174",
"0.56362194",
"0.56339043",
"0.56308615",
"0.5603898",
"0.5564887",
"0.5550396",
"0.5513715"
]
| 0.8740929 | 0 |
Prepares the json files for the Voicebank dataset. Expects the data folder to be the same format as the output of ``download_vctk()`` below. Arguments | def prepare_voicebank(
data_folder, save_folder, valid_speaker_count=2, skip_prep=False
):
if skip_prep:
return
# Setting ouput files
save_json_train = os.path.join(save_folder, TRAIN_JSON)
save_json_valid = os.path.join(save_folder, VALID_JSON)
save_json_test = os.path.join(save_folder, TEST_JSON)
# Check if this phase is already done (if so, skip it)
if skip(save_json_train, save_json_test, save_json_valid):
logger.info("Preparation completed in previous run, skipping.")
return
train_clean_folder = os.path.join(
data_folder, "clean_trainset_28spk_wav_16k"
)
train_noisy_folder = os.path.join(
data_folder, "noisy_trainset_28spk_wav_16k"
)
train_txts = os.path.join(data_folder, "trainset_28spk_txt")
test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k")
test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k")
test_txts = os.path.join(data_folder, "testset_txt")
# Setting the save folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Additional checks to make sure the data folder contains Voicebank
check_voicebank_folders(
train_clean_folder,
train_noisy_folder,
train_txts,
test_clean_folder,
test_noisy_folder,
test_txts,
)
logger.debug("Creating lexicon...")
lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt"))
logger.info("Creating json files for noisy VoiceBank...")
logger.debug("Collecting files...")
extension = [".wav"]
valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count]
wav_lst_train = get_all_files(
train_noisy_folder, match_and=extension, exclude_or=valid_speakers,
)
wav_lst_valid = get_all_files(
train_noisy_folder, match_and=extension, match_or=valid_speakers,
)
wav_lst_test = get_all_files(test_noisy_folder, match_and=extension)
logger.debug("Creating json files for noisy VoiceBank...")
create_json(
wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file):\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n\n # Get wmt data to the specified directory.\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n\n # Create vocabularies of the appropriate sizes.\n vocab_path = os.path.join(data_dir, \"vocab.txt\")\n create_vocabulary(vocab_path, vocab_file)\n\n # Create token ids for the training data.\n src_train_ids_path = train_path + \".src.ids\"\n targ_train_ids_path = train_path + \".targ.ids\"\n data_to_token_ids(train_path + \".src\", src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + \".targ\", targ_train_ids_path, vocab_path)\n\n # Create token ids for the development data.\n src_dev_ids_path = dev_path + \".src.ids\"\n targ_dev_ids_path = dev_path + \".targ.ids\"\n data_to_token_ids(dev_path + \".src\", src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + \".targ\", targ_dev_ids_path, vocab_path)\n\n return (src_train_ids_path, targ_train_ids_path,\n src_dev_ids_path, targ_dev_ids_path,\n vocab_path)",
"def make_json(prefix, input_dir):\n # get list of files\n file_list = os.listdir(input_dir)\n # set reference sequence\n tracklist = {'formatVersion': 1,\n 'refSeqs': '%s.ref.fa.fai' % prefix,\n 'tracks': []}\n # add reference sequence track to tracklist.json\n tracklist['tracks'].append({\"category\": \"Reference sequence\",\n \"key\": \"Reference sequence\",\n \"label\": \"Reference sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.ref.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n tracklist['tracks'].append({\"category\": \"Consensus sequence\",\n \"key\": \"Consensus sequence\",\n \"label\": \"Consensus sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.cons.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n # add bigwig track to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Coverage\",\n \"label\": \"Coverage\",\n \"type\": \"JBrowse/View/Track/Wiggle/XYPlot\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BigWig\",\n \"autoscale\": \"local\",\n \"urlTemplate\": \"%s.sorted.bw\" % prefix\n })\n # add BAM Sequence Coverage to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (SNPs/Coverage)\",\n \"label\": \"Sequence reads (SNPs/Coverage)\",\n \"type\": \"JBrowse/View/Track/SNPCoverage\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add BAM Sequence Alignments to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (Alignment)\",\n \"label\": \"Sequence reads (Alignment)\",\n \"type\": \"JBrowse/View/Track/Alignments2\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n # add bigwig histogram option\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add GFF3 file to trackList.json\n tracklist['tracks'].append({\"category\": \"Annotation\",\n \"key\": \"Genbank annotation\",\n \"label\": \"Genbank annotation\",\n \"type\": \"JBrowse/View/Track/CanvasFeatures\",\n \"urlTemplate\": \"%s.gff3.gz\" % prefix,\n \"style\": {\n \"_defaultHistScale\": 4,\n \"_defaultLabelScale\": 30,\n \"_defaultDescriptionScale\": 120,\n # Comma-separated list of case-insensitive feature tags to use\n # for showing the feature's label.\n # The first one found will be used. Default 'name,id'.\n \"label\": \"product,id\",\n # style→description\tComma-separated list of case-insensitive\n # feature tags to check for the feature's long description.\n # The first one found will be used. Default 'note,description'.\n # If blank no description is used.\n \"description\": \"note, description\"\n },\n })\n\n json_path = os.path.join(input_dir, 'trackList.json')\n with open(json_path, 'wt') as output_handle:\n json_raw_str = json.dumps(tracklist, indent=4)\n output_handle.write(json_raw_str)\n return 'trackList.json'",
"def prepare_conll_data(brat_dirname,\n json_filename,\n dest_dirname):\n brat2json_dir(brat_dirname, json_filename)\n data = read_json(json_filename)\n # split data into training, validation and test\n training_data, validation_data, test_data = split_data(data)\n # output training data into CoNLL format\n output_conll(entity2label_batch(training_data), dest_dirname + 'training.conll')\n # output all data into json file\n write_json(dest_dirname + 'training.json', training_data)\n write_json(dest_dirname + 'validation.json', validation_data)\n write_json(dest_dirname + 'test.json', test_data)",
"def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"async def data(normalizers):\n harvester_path = DATA_DIR / \"civic_harvester.json\"\n c = CIViCTransform(data_dir=DATA_DIR, harvester_path=harvester_path,\n normalizers=normalizers)\n await c.transform()\n c.create_json(transform_dir=DATA_DIR, filename=FILENAME)\n with open(DATA_DIR / FILENAME, \"r\") as f:\n data = json.load(f)\n return data",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def setup_data( self ):\n with open( f'{project_dir}/data/05_source_key_data.json', 'r', encoding='utf-8' ) as f:\n dct = json.loads( f.read() )\n with open( f'{project_dir}/data/04_snapshot_open_textbook.json', 'r', encoding='utf-8' ) as f:\n lst = json.loads( f.read() )\n return ( dct, lst )",
"def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))",
"def _prepare_samples(args):\n if args.galaxy:\n system_config = args.galaxy\n else:\n system_config = os.path.join(_get_data_dir(), \"galaxy\", \"bcbio_system.yaml\")\n config = yaml.load(open(system_config))\n config['algorithm'] = {}\n data = []\n vcf_files = [fn for fn in args.files if fn.endswith('vcf')]\n bam_files = [fn for fn in args.files if fn.endswith('bam')]\n fastq_files = [fn for fn in args.files if is_fastq(fn)]\n if not fastq_files:\n fastq_files = vcf_files\n for sample in fastq_files:\n dt = {}\n dt['name'] = splitext_plus(op.basename(sample))[0]\n dt['config'] = config\n dt['fastq'] = op.abspath(sample)\n if bam_files:\n dt['bam'] = _find_bam(bam_files, sample)\n data.append([dt])\n return data",
"def test_files(self, location):\n for filename in os.listdir(location):\n with open(location + '/' + filename) as json_file:\n data = json.load(json_file)\n self.test_data(data)",
"def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)",
"def fix_jsons_in(bids_dir: Path):\n\n print(\"Finalizing task json files.\")\n\n for path in bids_dir.rglob(\"func/*_task-*.json\"):\n append_to_json_file(key=\"TaskName\", value=task_name_of(path), path_to_json=path)\n\n print(\"Appending echo times to phase difference json files.\")\n\n for path in bids_dir.rglob(\"fmap/*_phasediff.json\"):\n magnitude1_path = the_path_that_matches(pattern=\"sub-*_magnitude1.json\", in_directory=path.parent)\n magnitude2_path = the_path_that_matches(pattern=\"sub-*_magnitude2.json\", in_directory=path.parent)\n echo_time1 = value_of_key_in_json_file(\"EchoTime\", magnitude1_path)\n echo_time2 = value_of_key_in_json_file(\"EchoTime\", magnitude2_path)\n append_to_json_file(key=\"EchoTime1\", value=echo_time1, path_to_json=path)\n append_to_json_file(key=\"EchoTime2\", value=echo_time2, path_to_json=path)\n\n print(\"Setting targets of fieldmap json files.\")\n\n for path in bids_dir.rglob(\"fmap/*.json\"):\n func_dir = path.parent.parent / \"func\"\n trimmed_func_paths = [\"func/\" + func_path.name for func_path in func_dir.glob(\"*.nii\")]\n append_to_json_file(key=\"IntendedFor\", value=trimmed_func_paths, path_to_json=path)",
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))",
"def prepare_data(vocabulary_size):\n print(\"Downloading data from \" + _DATA_DIR_ +\"..\")\n getData(_DATA_DIR_)\n print(\"Creating Vocabulary..\")\n create_vocabulary( _VOCAB_DIR_, _RAW_SENTENCES_DIR_, vocabulary_size )\n print(\"Converting sentences to sequences of ids..\")\n data_to_token_ids( _RAW_SENTENCES_DIR_ , _SENTENCES_DIR, _VOCAB_DIR_ )",
"def extract_json_to_files(input_dir,output_dir):\n files={}\n files['train']='train-v1.1.json'\n files['dev']='dev-v1.1.json'\n\n for file in files:\n filename=os.path.join(input_dir,files[file])\n with open(filename,'r',encoding='utf-8') as data_file:\n examples = []\n dataset=json.load(data_file)\n count_total=total_exs(dataset)\n count_mapping_problem=0\n count_token_problem=0\n count_ansspan_problem=0\n count_examples=0\n for article_id in tqdm(range(len(dataset['data'])), desc=\"Preprocessing {}\".format(file)):\n article_paragraph=dataset['data'][article_id]['paragraphs']\n for paragraph_id in range(len(article_paragraph)):\n context=article_paragraph[paragraph_id]['context']\n context=context.replace(\"''\",'\"').replace(\"``\",'\"')\n context = context.replace('\\u3000', ' ').replace('\\u202f',' ').replace('\\u2009', ' ')#.replace(\"'\",\"'\")\n context=context.replace('\\-',' ')\n context_tokens=tokenize_sequence(context)\n context=context.lower()\n qas=article_paragraph[paragraph_id]['qas']\n charloc2wordloc=get_char_word_loc_mapping(context, context_tokens)\n if charloc2wordloc is None:\n count_mapping_problem+=len(qas)\n continue\n for qa in qas:\n question=qa['question'].lower()\n question_tokens=tokenize_sequence(question)\n\n ans_text=qa['answers'][0]['text'].lower()\n ans_text=ans_text.replace('\\u3000', ' ').replace('\\u202f', ' ').replace('\\u2009', ' ')\n ans_start_loc=qa['answers'][0]['answer_start']\n if qa['id'] in ['5706baed2eaba6190074aca5','57269c73708984140094cbb5','57269c73708984140094cbb7','572a11661d04691400779721','572a11661d04691400779722','572a11661d04691400779723','572a11661d04691400779724','572a11661d04691400779725','572a2cfc1d0469140077981b','572a3a453f37b319004787e9','572a84d3f75d5e190021fb3c']:\n ans_start_loc+=1\n if qa['id'] in ['572a5df77a1753140016aedf','572a5df77a1753140016aee0','572a84d3f75d5e190021fb38','572a84d3f75d5e190021fb39','572a84d3f75d5e190021fb3a','572a84d3f75d5e190021fb3b','572a85df111d821400f38bad','572a85df111d821400f38bae','572a85df111d821400f38baf','572a85df111d821400f38bb0']:\n ans_start_loc+=2\n if qa['id'] in ['572a5df77a1753140016aee1','572a5df77a1753140016aee2']:\n ans_start_loc+=3\n if qa['id'] in ['57286bf84b864d19001649d6','57286bf84b864d19001649d5']:\n ans_start_loc-=1\n if qa['id'] in ['5726bee5f1498d1400e8e9f3','5726bee5f1498d1400e8e9f4']:\n ans_start_loc-=2\n ans_end_loc=ans_start_loc+len(ans_text)\n\n if context[ans_start_loc:ans_end_loc]!=ans_text:\n count_ansspan_problem+=1\n continue\n ans_start_wordloc = charloc2wordloc[ans_start_loc][1] # answer start word loc\n ans_end_wordloc = charloc2wordloc[ans_end_loc-1][1] # answer end word loc\n assert ans_start_wordloc <= ans_end_wordloc\n\n ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc + 1]\n if \"\".join(ans_tokens) != \"\".join(ans_text.split()):\n count_token_problem += 1\n #print(ans_text)\n #print(ans_tokens)\n continue # skip this question/answer pair\n examples.append((' '.join(context_tokens),' '.join(question_tokens),' '.join(ans_tokens),' '.join([str(ans_start_wordloc),str(ans_end_wordloc)])))\n print(\"Number of (context, question, answer) triples discarded due to char -> token mapping problems: \", count_mapping_problem)\n print(\"Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: \",count_token_problem)\n print(\"Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): \",count_ansspan_problem)\n print(\"Processed %i examples of total %i\\n\" % (len(examples), len(examples)+count_mapping_problem+count_token_problem+count_ansspan_problem))\n indices = list(range(len(examples)))\n np.random.shuffle(indices)\n with open(os.path.join(output_dir,file+'.context'),'w',encoding='utf-8') as context_file, \\\n open(os.path.join(output_dir,file+'.question'),'w',encoding='utf-8') as question_file, \\\n open(os.path.join(output_dir,file+'.answer'),'w',encoding='utf-8') as answer_file, \\\n open(os.path.join(output_dir,file+'.span'),'w',encoding='utf-8') as span_file:\n for i in indices:\n (context,question,answer,span)=examples[i]\n context_file.write(context+'\\n')\n question_file.write(question+'\\n')\n answer_file.write(answer+'\\n')\n span_file.write(span+'\\n')",
"def parse_json_data(settings, dataset):\n for directory in dataset: # for directory in list of directories\n directory[\"data\"] = []\n for record in directory[\"rawdata\"]: # each record is the raw JSON data of a file in a directory\n jsonrootpath = get_json_root_path(record)\n globaloptions = get_json_global_options(record)\n #for item in record[\"client_stats\"]:\n # if \"job options\" in item.keys():\n # print(item[\"job options\"][\"iodepth\"])\n process_json_record(settings, directory, record, jsonrootpath, globaloptions)\n #print(\"================================\")\n #print(directory[\"data\"])\n #for directory in dataset:\n # for item in directory[\"data\"]:\n # print(item[\"iodepth\"])\n directory[\"data\"] = sort_list_of_dictionaries(directory[\"data\"])\n return dataset",
"def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')",
"def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)",
"def prepare_training_data(self, data_folder_path):\n\n #get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n\n #list to hold all subject faces\n faces = []\n #list to hold labels for all subjects\n labels = []\n #List to hold subject names\n subjects = []\n\n label = -1;\n #let's go through each directory and read images within it\n for dir_name in dirs:\n\n #ignore system files like .DS_Store\n if dir_name.startswith(\".\"):\n continue;\n\n label += 1\n subjects.append(dir_name)\n logger.info(\"label=%d subject=%s\" %(label, dir_name))\n\n #build path of directory containing images for current subject subject\n #sample subject_dir_path = \"training-data/Bruce\"\n subject_dir_path = data_folder_path + \"/\" + dir_name\n\n #get the images names that are inside the given subject directory\n subject_images_names = os.listdir(subject_dir_path)\n\n #go through each image name, read image,\n #detect face and add face to list of faces\n for image_name in subject_images_names:\n\n #ignore system files like .DS_Store\n if image_name.startswith(\".\"):\n continue;\n\n #sample image path = training-data/Bruce/face1.png\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n logger.info(\"file size: %d. numpy image size: %d\" %(os.path.getsize(image_path), len(image)))\n face, rect = self.detect_face(image)\n\n #we will ignore faces that are not detected\n if face is not None:\n #add face to list of faces\n faces.append(face)\n #add label for this face\n labels.append(label)\n\n return faces, labels, subjects",
"def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]",
"def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()",
"def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list",
"def read_data(train_data_dir, test_data_dir):\n clients = []\n train_num_samples = []\n test_num_samples = []\n train_data = {}\n test_data = {}\n\n train_files = os.listdir(train_data_dir)\n train_files = [f for f in train_files if f.endswith(\".json\")]\n # print(train_files)\n for f in train_files:\n file_path = os.path.join(train_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n clients.extend(cdata[\"users\"])\n train_num_samples.extend(cdata[\"num_samples\"])\n train_data.update(cdata[\"user_data\"])\n # print(cdata['user_data'])\n test_files = os.listdir(test_data_dir)\n test_files = [f for f in test_files if f.endswith(\".json\")]\n for f in test_files:\n file_path = os.path.join(test_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n test_num_samples.extend(cdata[\"num_samples\"])\n test_data.update(cdata[\"user_data\"])\n\n # parse python script input parameters\n parser = argparse.ArgumentParser()\n main_args = add_args(parser)\n\n class Args:\n def __init__(self, client_id, client_num_per_round, comm_round):\n self.client_num_per_round = client_num_per_round\n self.comm_round = comm_round\n self.client_id = client_id\n self.client_sample_list = []\n\n client_list = []\n for client_number in range(main_args.client_num_per_round):\n client_list.append(\n Args(client_number, main_args.client_num_per_round, main_args.comm_round)\n )\n return (\n clients,\n train_num_samples,\n test_num_samples,\n train_data,\n test_data,\n client_list,\n )",
"def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass",
"def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")",
"def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')",
"def main(json_data_filepath='dataset.json',\n dataset_path='.',\n filename_base='dataset',\n drop_irrelevant_tweets=True,\n add_company_datasets=False,\n logging_level=log.INFO,\n ):\n log.basicConfig(level=logging_level, format='%(message)s')\n log.info(f'building the dataset')\n\n if not os.path.isfile(json_data_filepath):\n log.fatal(f'\\tfilepath doesn\\'t exist: {json_data_filepath}')\n exit(-1)\n\n full_dataset_filepath = Path(dataset_path) / f'{filename_base}.csv'\n remove_filepath_if_exists(full_dataset_filepath)\n\n create_dataset(Path(json_data_filepath), full_dataset_filepath, drop_irrelevant_tweets)\n\n if add_company_datasets:\n create_separate_company_datasets(full_dataset_filepath,\n Path(dataset_path),\n filename_base)",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)"
]
| [
"0.62081885",
"0.6122723",
"0.6101541",
"0.6090445",
"0.5949171",
"0.5927241",
"0.59200364",
"0.58897924",
"0.58572596",
"0.5853755",
"0.58109206",
"0.57762074",
"0.57556504",
"0.5751836",
"0.5726079",
"0.57242584",
"0.56926864",
"0.56814444",
"0.5679649",
"0.5651899",
"0.5651511",
"0.56424725",
"0.5635004",
"0.5631469",
"0.56198305",
"0.55945086",
"0.555876",
"0.55492306",
"0.55302113",
"0.5529492"
]
| 0.65272224 | 0 |
Creates the lexicon object, downloading if it hasn't been done yet. Arguments | def create_lexicon(lexicon_save_filepath):
if not os.path.isfile(lexicon_save_filepath):
download_file(LEXICON_URL, lexicon_save_filepath)
# Iterate lexicon file and add the first pronunciation in the file for
# each word to our lexicon dictionary
lexicon = MISSING_LEXICON
delayed_words = {}
for line in open(lexicon_save_filepath):
line = line.split()
phns = " ".join(p.strip("012") for p in line[1:])
# Don't add words with punctuation until we can be sure they won't
# overwrite words without punctuation.
clean_word = remove_punctuation(line[0])
if clean_word != line[0] and clean_word not in delayed_words:
delayed_words[clean_word] = phns
elif clean_word == line[0] and clean_word not in lexicon:
lexicon[clean_word] = phns
# Add words with punctuation if they won't overwrite non-punctuated words
for word, phns in delayed_words.items():
if word not in lexicon:
lexicon[word] = phns
return lexicon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_lexicon(self):\n self.logger.info(\"Preparing lexicon...\")\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"lexicon_by_logical\"\n file_name = \"lexicon\"\n\n if self.domain_split:\n file_name += \"_domain_split\"\n elif self.length_split:\n file_name += \"_length_split\"\n file_name += \".pkl\"\n\n if not (dir_path / file_name).is_file():\n self.create_matching_lexicon(dir_path, file_name)\n data = load_obj(dir_path, file_name)\n\n # TODO delete this?\n # for type in data:\n # for ex in data[type]:\n # data[type][ex] = ast.literal_eval(data[type][ex])\n self.logger.info(\"Lexicon ready.\")\n return data",
"def __init__(self, lexicon):\n self._lexicon = set(lexicon)",
"def init_downloader(self) -> None:\n raise NotImplementedError",
"def lexicon() -> Lexicon:\n return Lexicon()",
"def __init__(self, domain: str = 'minecraft', resource_dir: Sequence[str] = ('src', 'main', 'resources'), indent: int = 2, ensure_ascii: bool = False, default_language: str = 'en_us', on_error: Callable[[str, Exception], Any] = None):\n self.resource_dir = utils.str_path(resource_dir)\n self.domain = domain\n self.indent = indent\n self.ensure_ascii = ensure_ascii\n self.default_language = default_language\n self.on_error = on_error\n\n if self.on_error is None:\n self.on_error = lambda file, err: None # Ignore errors\n\n # Internal buffers, used for tags and lang entries, which are all written at the same time\n self.lang_buffer: Dict[str, Dict[str, str]] = defaultdict(dict) # Keys are (language, translation key)\n self.tags_buffer: Dict[str, Dict[ResourceLocation, Tag]] = defaultdict(dict) # Keys are (tag type, tag name)\n\n # Statistics\n self.new_files: int = 0\n self.modified_files: int = 0\n self.unchanged_files: int = 0\n self.error_files: int = 0",
"def create_matching_lexicon(self, dir_path, file_name):\n # There are more examples in the lexicon dataset than in the logical examples\n # This function creates one-to-one mapping between them and stores lexicon dict in a file\n self.logger.info('Creating lexicon...')\n dataset_qdmr_lexicon = self.load_dataset(dir_path, 'QDMR-lexicon', self.logger)\n\n lexicon_lists = {'train': [], 'validation': [], 'test': []}\n for data_split in self.dataset_logical:\n lex_idx = 0\n lexicon_split = dataset_qdmr_lexicon[data_split]\n for i, example in enumerate(self.dataset_logical[data_split]):\n question = example['question_text']\n lexcion_found = False\n for j in range(lex_idx, len(lexicon_split)):\n lexicon_example = lexicon_split[j]\n if lexicon_example['source'] == question:\n str_lex = lexicon_example['allowed_tokens']\n lexicon_lists[data_split].append(str_lex)\n lex_idx = j + 1\n lexcion_found = True\n break\n # if it got here, no matching lexicon found in lexicon file\n if not lexcion_found:\n raise EOFError\n save_obj(dir_path, lexicon_lists, file_name)\n self.logger.info('Done creating lexicon.')",
"def create(self):\n\t\tlipsBaseFile.imp()",
"def __init__(self, filename=None):\n self.content = dict()\n if filename and os.path.exists(filename):\n self.parse(filename)\n elif filename:\n self.new(filename)",
"def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))",
"def initialize() -> fetcher.Fetcher:\n options = fetcher.Input(\n command=\"some_cmd\", config_file=\"looker.ini\", section=\"Looker\"\n )\n return fetcher.Fetcher(options)",
"def __init__(self, import_io, username=None):\r\n self.file_handle = import_io\r\n self.username = username\r\n\r\n # we need to get our list of hashes to make sure we check for dupes\r\n self.hash_list = set([b[0] for b in\r\n BmarkMgr.hash_list(username=username)])",
"def __init__(self, downloader=None):\n super(YoumakerIE, self).__init__(downloader=downloader)\n self._protocol = \"https\"\n self._category_map = None\n self._cache = {}",
"def _get_lexicon_update(self, lexicon):\n\n def merge(lists):\n \"\"\"\n Merge the lists so lists with overlap are joined together\n (i.e. [[1,2], [3,4], [2,5]] --> [[1,2,5], [3,4]])\n from: http://stackoverflow.com/a/9400562\n \"\"\"\n newsets, sets = [set(lst) for lst in lists if lst], []\n while len(sets) != len(newsets):\n sets, newsets = newsets, []\n for aset in sets:\n for eachset in newsets:\n if not aset.isdisjoint(eachset):\n eachset.update(aset)\n break\n else:\n newsets.append(aset)\n return newsets\n\n def get_coreferences(coreferences):\n \"\"\"Decode the SAF coreferences as (node: coreferencing_nodes) pairs\"\"\"\n coref_groups = []\n for a, b in coreferences:\n # take only the heads of each coref group\n coref_groups.append([a[0], b[0]])\n for nodes in merge(coref_groups):\n for node in nodes:\n yield node, nodes\n\n coreferences = dict(get_coreferences(self.saf_article.get('coreferences', [])))\n\n classes = defaultdict(set) # token -> classes\n uris = {}\n for uri, token in self.get_tokens().iteritems():\n if 'pos' not in token: continue # not a word\n uris[int(token['id'])] = uri\n pos = token['pos']\n lemma = token['lemma']\n for lex in lexicon:\n if \"pos\" in lex and lex['pos'] != pos:\n continue\n lemmata = lex['lemma']\n lexclass = lex['lexclass']\n if not isinstance(lemmata, list):\n lemmata = [lemmata]\n for target in lemmata:\n if (target == lemma or target == lemma.lower()\n or (target.endswith(\"*\") and lemma.lower().startswith(target[:-1]))):\n id = int(token['id'])\n for coref in coreferences.get(id, [id]):\n classes[coref].add(lexclass)\n inserts = []\n for id, lexclasses in classes.iteritems():\n if id not in uris:\n continue # coref to different sentence\n uri = str(uris[id]).replace(AMCAT, \":\")\n for lexclass in lexclasses:\n inserts.append('{uri} :lexclass \"{lexclass}\"'.format(**locals()))\n return {\"insert\": \".\\n\".join(inserts)}",
"def __init__(self, url_download, dependency, source, cwd):\n self.url_download = url_download\n self.dependency = dependency\n self.source = source\n self.cwd = cwd",
"def __init__(self) -> None:\n\n self.__author__ = \"GodSaveTheDoge\"\n self.selector = \"#mw-content-text li , p\"\n self.url = \"https://{}.wikipedia.org/wiki/{}\"\n self.apiurl = (\n \"https://en.wikipedia.org/w/api.php?action=query&titles={}&format=json\"\n )",
"def __init__(self, name=''):\r\n if name:\r\n self.file_name = name + '.md'\r\n self.file = open(self.file_name, 'w+', encoding='UTF-8')\r\n self.file.close()",
"def make_lexicon_txt(self):\n raise NotImplementedError",
"def create(self):\n\t\tif self.isInitialized():\n\t\t\tself.Loaded = self.loader.create()",
"def __init__(self, lexicon, source, max_sentence_length=10000, limit=None):\n self.source = source\n self.max_sentence_length = max_sentence_length\n self.limit = limit\n self.lexicon = lexicon\n self.total = 0",
"def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()",
"def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0",
"def __init__(self, *args):\n this = _libsbml.new_XMLOwningOutputFileStream(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, url, token):\n super().__init__(url, token)\n self.componentGeneratorForPypi = ComponentGenerator.generator_for_ecosystem('pypi')\n self._dump_json_responses = False",
"def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise",
"def __init__(self, *args, **kwargs):\r\n \r\n self.current_result = dict()\r\n\r\n self.owner = kwargs['owner']\r\n self.resources = kwargs.get('resources', \r\n settings.GITHUB_SETTINGS['GITHUB_SUPPORTED_RESOURCES']\r\n )\r\n\r\n self.page = kwargs.get('page', settings.DEFAULT_PAGE)\r\n self.per_page = kwargs.get('per_page', settings.DEFAULT_PER_PAGE) \r\n \r\n self.repo_list = self._get_repo_list(**kwargs)\r\n\r\n self.repo_done = []\r\n self.current_repo = None",
"def __init__(self, domain=None, language=None, translation_path=DEFAULT_TRANSLATION_PATH,\n update_on_missing=False, cache_time=datetime.timedelta(hours=1)):\n\n self.default_domain = domain\n self.default_language = language\n self.translation_path = translation_path\n self.update_on_missing = update_on_missing\n self.cache_time = cache_time\n\n self.language = self.init_language()\n self.domain_cache = {}\n\n if not os.path.exists(self.translation_path):\n os.mkdir(self.translation_path)\n\n\tif socket.getfqdn().endswith('.wmflabs'):\n\t self.download_url = Intuition.DOWNLOAD_URL_LABS\n\telse:\n\t self.download_url = Intuition.DOWNLOAD_URL_WWW",
"def render_lily(self, lily):\n shasum = \"%s.png\" % sha(lily.encode('utf-8')).hexdigest()\n relfn = posixpath.join(self.builder.imgpath, 'lily', shasum)\n outfn = path.join(self.builder.outdir, '_images', 'lily', shasum)\n if path.isfile(outfn):\n return relfn\n\n if hasattr(self.builder, '_lilypng_warned'):\n return None, None\n\n music = DOC_HEAD + self.builder.config.pnglily_preamble + lily\n if isinstance(music, unicode):\n music = music.encode('utf-8')\n\n # use only one tempdir per build -- the use of a directory is cleaner\n # than using temporary files, since we can clean up everything at once\n # just removing the whole directory (see cleanup_tempdir_lily)\n if not hasattr(self.builder, '_lilypng_tempdir'):\n tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp()\n else:\n tempdir = self.builder._lilypng_tempdir\n\n tf = open(path.join(tempdir, 'music.ly'), 'w')\n tf.write(music)\n tf.close()\n\n ensuredir(path.dirname(outfn))\n # use some standard lilypond arguments\n lilypond_args = [self.builder.config.pnglily_lilypond]\n #lilypond_args += ['-o', tempdir, '--png']\n lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts',\n '-o', tempdir, '--png']\n # add custom ones from config value\n lilypond_args.extend(self.builder.config.pnglily_lilypond_args)\n\n # last, the input file name\n lilypond_args.append(path.join(tempdir, 'music.ly'))\n try:\n p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE)\n except OSError, err:\n if err.errno != 2: # No such file or directory\n raise\n self.builder.warn('lilypond command %r cannot be run (needed for music '\n 'display), check the pnglily_lilypond setting' %\n self.builder.config.pnglily_lilypond)\n self.builder._lilypng_warned = True\n return None, None\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise LilyExtError(u'lilypond exited with error:\\n[stderr]\\n%s\\n'\n '[stdout]\\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8')))\n\n shutil.copyfile(path.join(tempdir, 'music.png'), outfn)\n #Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE)\n\n return relfn",
"def files_constructor(cmd_args, file_type):\n\n if file_type == 'learning':\n learning_files_list = [LearningFile(address, cmd_args.primary_selex_sequence) for address in cmd_args.learning_file_list]\n [learning_files_list[i].cycle_matrix(i, len(learning_files_list)) for i in range(len(learning_files_list))]\n return learning_files_list\n\n elif file_type == 'prediction':\n if cmd_args.prediction_file:\n return PredictionFile(cmd_args.prediction_file)\n else:\n return None\n\n else:\n 'the user can insert here some code for suppeltementary files'",
"def __init__(self):\n\t\tself.label = \"Endpoint Downloader\"\n\t\tself.description = \"This tool downloads geometry from queryable ArcGis Server endpoint.\"\n\t\tself.canRunInBackground = False",
"def download():\n raise NotImplementedError"
]
| [
"0.59617376",
"0.5480266",
"0.53487563",
"0.5315897",
"0.50601566",
"0.48838323",
"0.48664385",
"0.48546046",
"0.4842769",
"0.47873044",
"0.47842023",
"0.4728167",
"0.47178552",
"0.47145215",
"0.4710691",
"0.4710036",
"0.47037318",
"0.4697695",
"0.4661034",
"0.46569374",
"0.46421185",
"0.46376216",
"0.4633962",
"0.46265808",
"0.46152008",
"0.4610375",
"0.46061757",
"0.46007842",
"0.46003142",
"0.45571384"
]
| 0.5611766 | 1 |
Creates the json file given a list of wav files. Arguments | def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon):
logger.debug(f"Creating json lists in {json_file}")
# Processing all the wav files in the list
json_dict = {}
for wav_file in wav_lst: # ex:p203_122.wav
# Example wav_file: p232_001.wav
noisy_path, filename = os.path.split(wav_file)
_, noisy_dir = os.path.split(noisy_path)
_, clean_dir = os.path.split(clean_folder)
noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename)
clean_rel_path = os.path.join("{data_root}", clean_dir, filename)
# Reading the signal (to retrieve duration in seconds)
signal = read_audio(wav_file)
duration = signal.shape[0] / SAMPLERATE
# Read text
snt_id = filename.replace(".wav", "")
with open(os.path.join(txt_folder, snt_id + ".txt")) as f:
word_string = f.read()
word_string = remove_punctuation(word_string).strip().upper()
phones = [
phn for word in word_string.split() for phn in lexicon[word].split()
]
# Remove duplicate phones
phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j]
phone_string = " ".join(phones)
json_dict[snt_id] = {
"noisy_wav": noisy_rel_path,
"clean_wav": clean_rel_path,
"length": duration,
"words": word_string,
"phones": phone_string,
}
# Writing the json lines
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def create_upload_files(files: List[UploadFile] = File(...)):\n\n if len(files) > 3:\n return {\" \": {\"mode\": \"File Limit Exceeded\"}}\n \n filename = \"_temp_files_one/myfilem.wav\"\n res_json = {}\n file_counter = 0\n for upload_file in files:\n \n with open(filename, \"wb\") as file_object:\n \n file_object.write(upload_file.file.read())\n \n res_json[upload_file.filename + str(file_counter)] = predict_many(filename)\n \n os.remove(filename)\n \n return res_json",
"def make_json(prefix, input_dir):\n # get list of files\n file_list = os.listdir(input_dir)\n # set reference sequence\n tracklist = {'formatVersion': 1,\n 'refSeqs': '%s.ref.fa.fai' % prefix,\n 'tracks': []}\n # add reference sequence track to tracklist.json\n tracklist['tracks'].append({\"category\": \"Reference sequence\",\n \"key\": \"Reference sequence\",\n \"label\": \"Reference sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.ref.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n tracklist['tracks'].append({\"category\": \"Consensus sequence\",\n \"key\": \"Consensus sequence\",\n \"label\": \"Consensus sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.cons.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n # add bigwig track to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Coverage\",\n \"label\": \"Coverage\",\n \"type\": \"JBrowse/View/Track/Wiggle/XYPlot\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BigWig\",\n \"autoscale\": \"local\",\n \"urlTemplate\": \"%s.sorted.bw\" % prefix\n })\n # add BAM Sequence Coverage to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (SNPs/Coverage)\",\n \"label\": \"Sequence reads (SNPs/Coverage)\",\n \"type\": \"JBrowse/View/Track/SNPCoverage\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add BAM Sequence Alignments to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (Alignment)\",\n \"label\": \"Sequence reads (Alignment)\",\n \"type\": \"JBrowse/View/Track/Alignments2\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n # add bigwig histogram option\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add GFF3 file to trackList.json\n tracklist['tracks'].append({\"category\": \"Annotation\",\n \"key\": \"Genbank annotation\",\n \"label\": \"Genbank annotation\",\n \"type\": \"JBrowse/View/Track/CanvasFeatures\",\n \"urlTemplate\": \"%s.gff3.gz\" % prefix,\n \"style\": {\n \"_defaultHistScale\": 4,\n \"_defaultLabelScale\": 30,\n \"_defaultDescriptionScale\": 120,\n # Comma-separated list of case-insensitive feature tags to use\n # for showing the feature's label.\n # The first one found will be used. Default 'name,id'.\n \"label\": \"product,id\",\n # style→description\tComma-separated list of case-insensitive\n # feature tags to check for the feature's long description.\n # The first one found will be used. Default 'note,description'.\n # If blank no description is used.\n \"description\": \"note, description\"\n },\n })\n\n json_path = os.path.join(input_dir, 'trackList.json')\n with open(json_path, 'wt') as output_handle:\n json_raw_str = json.dumps(tracklist, indent=4)\n output_handle.write(json_raw_str)\n return 'trackList.json'",
"def generate_code_files(code_list: List[str], base: str) -> None:\n for code in code_list:\n parts = code.split(' ')\n status = parts[0]\n name = \" \".join(parts[1:])\n path = os.path.join('codes', base, f'{status[0]}XX', f'{status}.json')\n data = {\n 'code': int(status),\n 'name': name,\n 'messages': []\n }\n with open(path, 'w') as jsonfile:\n json.dump(data, jsonfile, indent=4)",
"def make_waves(wave_array, filename: str, num_cycle=1):\n sampleRate = 44100.0 # hertz\n duration = 1.0 # seconds\n frequency = 440.0 # hertz\n obj = wave.open(filename, 'w')\n obj.setnchannels(1) # mono\n obj.setsampwidth(2)\n obj.setframerate(sampleRate)\n waves = list(wave_array)\n for w in range(num_cycle):\n for i in waves:\n value = i\n data = struct.pack('<h', int(value))\n obj.writeframesraw(data)\n obj.close()",
"def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)",
"def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')",
"def save_wavetables(self, path: str, filename_prefix: str = '') -> None:\n for i in range(len(self.wavetables)):\n if not os.path.exists(path):\n os.mkdir(path)\n location = os.path.join(path, filename_prefix + f'{i:02d}.wav')\n wav_file = WavFile(location)\n wav_file.write_samples(self.wavetables[i])",
"def _init_wave_files(self, files, directory):\n\n # 2048 triggers bug in https://github.com/adafruit/circuitpython/issues/3030\n self._file_buf = bytearray(512) # DO NOT CHANGE size til #3030 is fixed\n\n missing = []\n fhs = {}\n for file in files:\n wav_file = None\n filename = directory + \"/\" + file + \".wav\"\n try:\n wav_file = open(filename, \"rb\")\n fhs[file] = WaveFile(wav_file, self._file_buf)\n except OSError:\n # OSError: [Errno 2] No such file/directory: 'filename.ext'\n missing.append(filename)\n\n # Raises an exception at the end to allow it to report ALL\n # of the missing files in one go to help out the user\n if missing:\n raise SampleJukeboxError(missing)\n self._wave_files = fhs",
"def parse_files(files):\r\n return json.dumps(files.split())",
"def merge_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n if sys.platform == 'win32':\n videocluster = dirname + '/' + name\n listwaves = os.listdir(videocluster)\n listwaves.sort()\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n #file_basename = os.path.join(dirname, name)\n if sys.platform == 'win32':\n listw = [videocluster + '/' + fil for fil in listwaves] \n # file_basename = dirname + '/' + name\n self.wave = os.path.join(dirname, name + \".wav\")\n if sys.platform == 'win32':\n self.wave = dirname + '/' + name + \".wav\"\n fm.merge_waves(listw, self.wave)",
"def generate_files(self, output_dir: str) -> None:\n full_filename = os.path.join(output_dir, self.json_file)\n with open(full_filename, 'w', encoding='utf-8') as output_file:\n json.dump(self.zidb, output_file, indent=2)\n print(file=output_file) # add terminating newline\n logging.info(\"Created %s\", full_filename)",
"def read_and_save_osu_file_using_json_wavdata(path, json_path, filename=\"saved\", divisor=4):\n osu_dict, wav_file = read_osu_file(path, convert=True)\n data, flow_data = get_map_notes(osu_dict, divisor=divisor)\n with open(json_path) as wav_json:\n wav_data = json.load(wav_json)\n # in order to match first dimension\n # wav_data = np.swapaxes(wav_data, 0, 1);\n\n # change the representation of note_type\n # a bit of copypaste code because I changed the data structure many times here\n transformed_data = get_transformed_lst_data(data)\n\n # read hitsounds from circles for taiko mode\n hs_data = get_circle_hitsounds(osu_dict, divisor=divisor)\n\n np.savez_compressed(filename, lst=transformed_data,\n wav=wav_data, flow=flow_data, hs=hs_data)",
"def write_sound(file, snds: Property, pack_list, snd_prefix='*'):\n if snds.has_children():\n file.write('\"rndwave\"\\n\\t{\\n')\n for snd in snds:\n file.write(\n '\\t\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snd.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snd.value.casefold())\n file.write('\\t}\\n')\n else:\n file.write(\n '\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snds.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snds.value.casefold())",
"def create_files(filename_list, encoding):\n for filename in filename_list:\n codecs.open(filename, 'w', encoding).close()",
"def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')",
"def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result",
"def get_noise_dict(psrlist,noisefiles):\n\n params = {}\n json_files = sorted(glob.glob(noisefiles + '*.json'))\n for ff in json_files:\n if any([pp in ff for pp in psrlist]):\n with open(ff, 'r') as fin:\n params.update(json.load(fin))\n return params",
"def list_to_json(input_list, file_name):\n with open(file_name, 'w') as outfile:\n json.dump(input_list, outfile)",
"def get_wavs_dict_list(test_dir):\n # Find all clean files and make an {id: filepath} dictionary\n clean_wavs = glob.glob(os.path.join(test_dir, \"clean/*.wav\"))\n clean_dic = make_wav_id_dict(clean_wavs)\n # Same for noisy files\n noisy_wavs = glob.glob(os.path.join(test_dir, \"noisy/*.wav\"))\n noisy_dic = make_wav_id_dict(noisy_wavs)\n assert clean_dic.keys() == noisy_dic.keys()\n # Combine both dictionaries\n dict_list = [dict(clean=clean_dic[k], noisy=noisy_dic[k], id=k) for k in clean_dic.keys()]\n return dict_list",
"def cat_json(output_filename, input_filenames):\n\twith open(output_filename, \"w\") as outfile:\n\t\tfirst = True\n\t\tcounter = -1\n\t\tfor infile_name in input_filenames:\n\t\t\twith open(infile_name) as infile:\n\t\t\t\tif first:\n\t\t\t\t\toutfile.write('{')\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\toutfile.write(',')\n\t\t\t\toutfile.write(mangle(infile.read(), counter))\n\t\t\t\tcounter -= 1\n\t\toutfile.write('}')",
"def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))",
"def process_wav_files(wav_dir, id_list, out_dir, calc_mvn):\n file_ids = utils.get_file_ids(wav_dir, id_list)\n\n os.makedirs(os.path.join(out_dir, 'f0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'lf0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'vuv'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'sp'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'ap'), exist_ok=True)\n\n @utils.multithread\n def save_wav_to_files(file_id):\n wav_path = os.path.join(wav_dir, '{}.wav'.format(file_id))\n wav = wav_features.Wav(wav_path)\n\n f0, vuv, sp, ap = wav.extract_features()\n\n file_io.save_bin(f0, os.path.join(out_dir, 'f0', '{}.f0'.format(file_id)))\n file_io.save_bin(np.log(f0), os.path.join(out_dir, 'lf0', '{}.lf0'.format(file_id)))\n file_io.save_bin(vuv, os.path.join(out_dir, 'vuv', '{}.vuv'.format(file_id)))\n file_io.save_bin(sp, os.path.join(out_dir, 'sp', '{}.sp'.format(file_id)))\n file_io.save_bin(ap, os.path.join(out_dir, 'ap', '{}.ap'.format(file_id)))\n\n save_wav_to_files(file_ids)\n\n if calc_mvn:\n calclate_mvn_parameters(out_dir, 'f0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'lf0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'vuv', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'sp', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'ap', id_list=id_list, dtype=np.float32)",
"def directory_to_json(self, path, list_in):\n directory_json = {\"base_path\": path, \"files\": list_in}\n return directory_json",
"def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)",
"def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()",
"def save_as_json(spectrums: List[Spectrum], filename: str):\n if not isinstance(spectrums, list):\n # Assume that input was single Spectrum\n spectrums = [spectrums]\n\n # Write to json file\n with open(filename, 'w') as fout:\n json.dump(spectrums, fout, cls=SpectrumJSONEncoder)",
"def create_json_file(self,file_name):\n with open('saves/' + file_name + '.json', 'w') as fp:\n json.dump(self.options, fp, indent=4)",
"def save_to_file(cls, list_objs):\n with open(cls.__name__ + \".json\", 'w') as my_file:\n if list_objs is None:\n json.dump([], my_file)\n\n else:\n list_of_dict = []\n for dictionary in list_objs:\n list_of_dict.append(dictionary.to_dictionary())\n j_list_objs = Base.to_json_string(list_of_dict)\n my_file.write(j_list_objs)\n return (my_file)",
"def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)",
"def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")"
]
| [
"0.6976213",
"0.6822995",
"0.6490898",
"0.61406285",
"0.6051366",
"0.6036552",
"0.59901935",
"0.59424925",
"0.5882625",
"0.58272076",
"0.5790114",
"0.57872635",
"0.57773167",
"0.5763445",
"0.5733302",
"0.5714365",
"0.57103807",
"0.57057214",
"0.56364584",
"0.563437",
"0.5581596",
"0.5576743",
"0.5542576",
"0.55350333",
"0.5533205",
"0.5527276",
"0.5512034",
"0.5493403",
"0.5491249",
"0.5490415"
]
| 0.795731 | 0 |
Mean attentive vectors. Calculate mean attentive vector for the entire sentence by weighted summing all the contextual embeddings of the entire sentence Arguments | def _mean_attentive_vectors(self, x2, cosine_matrix):
# (batch_size, x1_timesteps, x2_timesteps, 1)
expanded_cosine_matrix = K.expand_dims(cosine_matrix, axis=-1)
# (batch_size, 1, x2_timesteps, embedding_size)
x2 = K.expand_dims(x2, axis=1)
# (batch_size, x1_timesteps, embedding_size)
weighted_sum = K.sum(expanded_cosine_matrix * x2, axis=2)
# (batch_size, x1_timesteps, 1)
sum_cosine = K.expand_dims(K.sum(cosine_matrix, axis=-1) + self.epsilon, axis=-1)
# (batch_size, x1_timesteps, embedding_size)
attentive_vector = weighted_sum / sum_cosine
return attentive_vector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def avg_sentence_vector(words, model, num_features, index2word_set):\n featureVec = np.zeros((num_features,), dtype=\"float32\")\n nwords = 0\n\n for word in words:\n if word in index2word_set:\n nwords = nwords+1\n featureVec = np.add(featureVec, model.wv[word])\n # featureVec = np.add(featureVec, model.wv.__getitem__(word))\n\n if nwords>0:\n featureVec = np.divide(featureVec, nwords)\n return featureVec",
"def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def word_averaging(wv, words):\n all_words, mean = set(), []\n \n for word in words:\n if isinstance(word, np.ndarray):\n mean.append(word)\n elif word in wv.vocab:\n mean.append(wv.vectors_norm[wv.vocab[word].index])\n all_words.add(wv.vocab[word].index)\n\n if not mean:\n logging.warning(\"Cannot compute similarity with no input: %s\", words)\n # Remove these examples in pre-processing...\n return np.zeros(50,)\n\n mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)\n \n return mean",
"def get_sentence_average_w2v(sent, word_to_vec, embedding_dim):\n sum_vec = np.zeros((embedding_dim,))\n known_tokens = 0\n for token in sent.text:\n if (token in word_to_vec.dict):\n known_tokens += 1\n sum_vec += word_to_vec[token]\n if (known_tokens != 0):\n return sum_vec / known_tokens\n else:\n return sum_vec",
"def call(self, inputs, mask=None):\n weights = K.expand_dims(inputs[0])\n vectors = inputs[1]\n wtd_vectors = weights * vectors\n wtd_avg = K.sum(wtd_vectors, axis=-2)\n return wtd_avg",
"def get_sentence_mean_vec(sentence):\n sentence_vecs = numpy.array([])\n \n sent1 = nltk.word_tokenize(sentence)\n for w in sent1: \n w = w.strip(\"'?.,- \")\n if not w in stop_words and w.lower() in glove_model:\n word_vec = numpy.array([glove_model[w.lower()]])\n if sentence_vecs.shape[0] == 0: # Initialize sentence vectors\n sentence_vecs = word_vec\n else:\n sentence_vecs = numpy.vstack((sentence_vecs, word_vec))\n # print(sentence_vecs.shape)\n if sentence_vecs.shape[0] == 0:\n return None\n elif sentence_vecs.shape == (300,):\n return numpy.expand_dims(sentence_vecs, axis=0)\n return numpy.mean(sentence_vecs, axis=0)",
"def _get_mean_embedding(self, words):\n\n # ensure the size still matches if it's loaded from pretrained word vectors\n size = self.size\n if self.w2v is not None:\n size = next(iter(self.w2v_.values())).size\n\n zero = np.zeros(size)\n if self.tfidf:\n embedding = np.mean([self.w2v_[w] * self.w2idf_[w]\n if w in self.w2v_ else zero for w in words], axis = 0)\n else:\n embedding = np.mean([self.w2v_.get(w, zero) for w in words], axis = 0)\n\n return embedding",
"def _mean_vec(self, node) -> Tuple[np.array, int]:\n tokens = [t for t in chain(node.token, (\"RoleId_%d\" % role for role in node.roles))\n if t in self.emb]\n if not tokens:\n return None, 0\n return np.mean([self.emb[t] for t in tokens], axis=0), len(tokens)",
"def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding",
"def compute_avg_w2v_vector(w2v_dict, text_nlp_proc):\n SIZE = 50 # size of the w2v dimension\n list_of_word_vectors = [w2v_dict[w] for w in text_nlp_proc if w in w2v_dict.vocab.keys()]\n if len(list_of_word_vectors) == 0:\n result = [0.0]*SIZE\n else:\n result = np.sum(list_of_word_vectors, axis=0) / len(list_of_word_vectors)\n return result",
"def average_one_hots(sent, word_to_ind):\n known_words = 0\n size = len(word_to_ind.keys())\n sum_vec = np.zeros((size,))\n for token in sent.text: #going over all tokens and summing their embeddings\n if (token in word_to_ind):\n sum_vec += get_one_hot(size, word_to_ind[token])\n known_words += 1\n if (known_words != 0):\n return sum_vec / known_words\n else:\n return sum_vec",
"def sentence_to_avg(sentence, word_to_vec_map):\n # Get a valid word contained in the word_to_vec_map. \n any_word = list(word_to_vec_map.keys())[0]\n \n ### START CODE HERE ###\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\n words = sentence.lower().split()\n\n # Initialize the average word vector, should have the same shape as your word vectors.\n avg = np.zeros(word_to_vec_map[any_word].shape)\n \n # Initialize count to 0\n count = 0\n \n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\n for w in words:\n # Check that word exists in word_to_vec_map\n if w in word_to_vec_map:\n avg += word_to_vec_map[w]\n # Increment count\n count +=1\n \n if count > 0:\n # Get the average. But only if count > 0\n avg = avg / count\n \n ### END CODE HERE ###\n \n return avg",
"def get_mean_emb(self, text):\n return np.mean([self.emb.get(w.lower(), self.emb.get(\"_UNK\")) for w in text.split()], axis=0)",
"def generate_avg_vector(self, data):\r\n doc=nlp(data)\r\n data_vector = [token.vector for token in doc]\r\n mean_vector = np.mean(data_vector, axis=0)\r\n return mean_vector",
"def aggregate_embeddings(list_of_embeddings):\n\n return np.mean(list_of_embeddings, axis=0)",
"def mean(self, words: [str]) -> numpy.ndarray:\n vecs = numpy.array([self[word] for word in words])\n return numpy.mean(vecs, axis=0)",
"def test_mean_results(self, concept: Concept):\n concept_args = dict(concept=concept, model_stump=None)\n for desc, (embs, (m_w, m_b)) in self.INTUITIVE_MEAN_EXAMPLES.items():\n m_w: np.ndarray = np.array(m_w)\n embeddings = [ConceptEmbedding(normal_vec=w, support_factor=b,\n **concept_args)\n for w, b in embs]\n # Actual routine\n m_emb: ConceptEmbedding = ConceptEmbedding.mean(embeddings)\n context_info = ((\"context:\\n mean embedding: ({}, {}, 1.)\"\n \"\\n in embeddings ({}) as (normal vec, support, \"\n \"scaling):\\n {}\")\n .format(m_emb.normal_vec, m_emb.support_factor,\n desc,\n [(e.normal_vec, e.support_factor,\n e.scaling_factor)\n for e in embeddings]))\n\n # Format checks\n assert m_emb.normal_vec.shape == embeddings[0].normal_vec.shape\n assert np.array(m_emb.support_factor).shape == np.array(\n embeddings[0].support_factor).shape\n\n # Value checks\n # Scaling\n expected_scaling: float = float(np.mean(\n [e.scaling_factor for e in\n [e.normalize() for e in embeddings]]))\n assert float(m_emb.scaling_factor) == expected_scaling, \\\n (\"Mean scaling wrong: expected {}., but was {}; {}\"\n .format(expected_scaling, m_emb.scaling_factor, context_info))\n # Normal vector\n assert np.allclose(m_emb.normal_vec, m_w), \\\n (\"Wrong mean normal vector: expected {}, but was {}; {}\"\n .format(m_w, m_emb.normal_vec, context_info))\n # Support\n assert np.allclose(m_emb.support_factor, m_b), \\\n (\"Wrong mean support factor: expected {}, but was {}; {}\"\n .format(m_b, m_emb.support_factor, context_info))",
"def _WordSimAveVec(self,df,a):\r\n #Obtain the course description for the course provided and convert the string into a list of individual words.\r\n Description = df['description'][a].split()\r\n #Create a placeholder zero vector of the same size as the vector embedding.\r\n Vector = np.zeros(self.WordVecModel.layer1_size)\r\n wordCount = 0\r\n #Iterate over each word in the description.\r\n for word in Description:\r\n #If the word is in the trained vocabulary, obtain the word vector. \r\n #Continue to add the word vectors to the placeholder vector to get the running sum.\r\n if word in self.WordVecModel.wv.vocab:\r\n vector = self.WordVecModel.wv.get_vector(word)\r\n Vector +=vector\r\n #Keep track of how many word vectors (which were included in the vocabulary) were added.\r\n wordCount +=1\r\n #Calculate the mean by dividing the sum by the number of vectors.\r\n return Vector/wordCount",
"def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))",
"def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))",
"def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))",
"def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))",
"def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))",
"def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)",
"def embed_token(self, token):\r\n embs, words = [], token.split()\r\n for word in words:\r\n emb_list=[]\r\n for element in word.split('_'):\r\n # If we have a trailing _ we don't want to embed an empty string\r\n if element:\r\n emb,_ = self(element, mean_sequence=True)\r\n emb_list.append(emb)\r\n embs.append(torch.mean(torch.stack(emb_list), dim=0))\r\n\r\n return torch.mean(torch.stack(embs), dim=0)",
"def test_distance_mean_results(self, concept: Concept):\n\n concept_args = dict(concept=concept, model_stump=None)\n for desc, (embs, (m_w, m_b)) in self.DISTANCE_MEAN_EXAMPLES.items():\n m_w: np.ndarray = np.array(m_w)\n embeddings = [ConceptEmbedding(normal_vec=w, support_factor=b,\n **concept_args)\n for w, b in embs]\n # Actual routine\n m_emb: ConceptEmbedding = \\\n ConceptEmbedding.mean_by_distance(embeddings)\n context_info = ((\"context:\\n mean embedding: ({}, {}, 1.)\"\n \"\\n in embeddings ({}) as (normal vec, support, \"\n \"scaling):\\n {}\")\n .format(m_emb.normal_vec, m_emb.support_factor,\n desc,\n [(e.normal_vec, e.support_factor,\n e.scaling_factor) for e in embeddings]))\n\n # Format checks\n assert m_emb.normal_vec.shape == embeddings[0].normal_vec.shape\n assert np.array(m_emb.support_factor).shape == np.array(\n embeddings[0].support_factor).shape\n\n # Value checks\n # the embedding should be scaled\n assert float(m_emb.scaling_factor) == 1., \\\n (\"Mean embedding not scaled: expected 1., but was {}; {}\"\n .format(m_emb.scaling_factor, context_info))\n assert np.allclose(m_emb.normal_vec, m_w), \\\n (\"Wrong mean normal vector: expected {}, but was {}; {}\"\n .format(m_w, m_emb.normal_vec, context_info))\n # For all given ground truths of support factors, check them:\n if m_b is not None:\n assert np.allclose(m_emb.support_factor, m_b), \\\n (\"Wrong mean support factor: expected {}, but was {}; {}\"\n .format(m_b, m_emb.support_factor, context_info))",
"def get_embeddings(vectors, text, generate_missing=False, k=300):\r\n embeddings = text.apply(lambda x: get_average_vec(x, vectors, generate_missing=generate_missing, k=k))\r\n return list(embeddings)",
"def _aggregate_vectors(movies, feedback_list=[]):\n movie_vec = []\n for i in movies:\n try:\n m_vec = clf[i] # get the vector for each movie\n if ratings_dict:\n try:\n r = ratings_dict[i] # get user_rating for each movie\n # Use a polynomial to weight the movie by rating.\n # This equation is totally arbitrary. I just fit a polynomial\n # to some weights that look good. The effect is to raise\n # the importance of 1, 9, and 10 star ratings.\n w = ((r**3)*-0.00143) + ((r**2)*0.0533) + (r*-0.4695) + 2.1867\n m_vec = m_vec * w\n except KeyError:\n continue\n movie_vec.append(m_vec)\n except KeyError:\n continue\n if feedback_list:\n for i in feedback_list:\n try:\n f_vec = clf[i]\n movie_vec.append(f_vec*1.8) # weight feedback by adding multiplier here\n except KeyError:\n continue\n return np.mean(movie_vec, axis=0)",
"def _get_emb_wavg(g, lang, a=0.001):\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0"
]
| [
"0.7076769",
"0.6930274",
"0.68626755",
"0.6553769",
"0.6473644",
"0.64046896",
"0.63692224",
"0.63644546",
"0.6316079",
"0.62446725",
"0.6241801",
"0.6147459",
"0.61322665",
"0.61276716",
"0.6124915",
"0.61032724",
"0.60569805",
"0.6038638",
"0.59962136",
"0.59507346",
"0.59507346",
"0.5897708",
"0.5897708",
"0.5897708",
"0.58624965",
"0.58533883",
"0.5835492",
"0.57906884",
"0.57467884",
"0.57340705"
]
| 0.7121484 | 0 |
Max attentive vectors. Calculate max attentive vector for the entire sentence by picking the contextual embedding with the highest cosine similarity as the attentive vector. Arguments | def _max_attentive_vectors(self, x2, cosine_matrix):
# (batch_size, x1_timesteps)
max_x2_step = K.argmax(cosine_matrix, axis=-1)
embedding_size = K.int_shape(x2)[-1]
timesteps = K.int_shape(max_x2_step)[-1]
if timesteps is None:
timesteps = K.shape(max_x2_step)[-1]
# collapse time dimension and batch dimension together
# collapse x2 to (batch_size * x2_timestep, embedding_size)
x2 = K.reshape(x2, (-1, embedding_size))
# collapse max_x2_step to (batch_size * h1_timesteps)
max_x2_step = K.reshape(max_x2_step, (-1,))
# (batch_size * x1_timesteps, embedding_size)
max_x2 = K.gather(x2, max_x2_step)
# reshape max_x2, (batch_size, x1_timesteps, embedding_size)
attentive_vector = K.reshape(max_x2, K.stack([-1, timesteps, embedding_size]))
return attentive_vector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )",
"def _max_attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # max attentive vector (batch_size, h1_timesteps, embedding_szie)\n max_attentive_vec = self._max_attentive_vectors(h2, cosine_matrix)\n # max_attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n max_attentive_vec = self._time_distributed_multiply(max_attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, max_attentive_vec)\n return matching",
"def maxmarginal(self, target, out=None):\n return self.__opReduce2(self.v - target,np.max, out=out)",
"def get_v_max(self) -> int:\n return len(self.vocabulary)",
"def MaxMarginalize(self,V):\r\n var = scipy.setdiff1d(self.var,V)\r\n map1 = [scipy.where(self.var==i)[0][0] for i in var]\r\n card = self.card[map1]\r\n \r\n assignments = I2A(range(len(self.val)), self.card)\r\n indx = A2I(assignments[:, map1], card)\r\n val = scipy.ndimage.maximum(self.val,indx,index = range( card.prod() ))\r\n \r\n return factor(var,card,val)",
"def argmax(vec):\n _, idx = torch.max(vec, -1)\n return to_scalar(idx)",
"def max_word_value(words):\n return max(words, key=calc_word_value)",
"def max_wupa(context_sentence, ambiguous_word):\r\n\r\n result = {}\r\n for i in wn.synsets(ambiguous_word):\r\n result[i] = sum(max([i.wup_similarity(k) for k in wn.synsets(j)]+[0]) \\\r\n for j in word_tokenize(context_sentence))\r\n result = sorted([(v,k) for k,v in result.items()],reverse=True)\r\n return result",
"def _maxAlien(self):\n maxA = 0\n for r in self._aliens:\n for y in r:\n if(y != None):\n maxA = max(maxA,y.x)\n return maxA",
"def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])",
"def most_influential_words(model, vectorizer, genre_index=0, num_words=10):\n features = vectorizer.get_feature_names()\n max_coef = sorted(enumerate(model.coef_[genre_index]), key=lambda x:x[1], reverse=True)\n return [[features[x[0]], x[1] ] for x in max_coef[:num_words]]",
"def most_similar(prediction):\r\n sTime = time()\r\n max_prediction = np.array([[0]])\r\n for key, value in dict_words_n_vectors.items():\r\n sim = cosine_similarity(prediction.reshape(1, -1), value.reshape(1, -1))\r\n if sim[0] > max_prediction[0]:\r\n max_prediction = sim\r\n word, vector = key, value\r\n m, s = divmod(calculate_time(sTime), 60)\r\n print(f\"--- done checking most similar word in {int(m):02d}:{int(s):02d} minutes --- \")\r\n return word, np.expand_dims(np.asarray(vector), axis=0)",
"def select_max(td, vocab, A, K):\n V, M = A.shape\n\n d = {}\n\n for m in range(M):\n k = 1\n # best features which are not selected yet\n best_feat = [a for a in A[:,m].argsort()[::-1] if not a in d]\n d.update(dict((a,1) for a in best_feat[:int(K/M)]))\n\n best_feat = np.array(d.keys())\n varr = vocab_array(vocab)\n\n return td[best_feat, :], vocab_dict(varr[best_feat])",
"def most_parallel_vector(v, vectors, tolerance_dot=0.0):\n\n best_dot = tolerance_dot\n best_w = None\n for w in vectors:\n d = abs_dot(v, w)\n if d > best_dot:\n best_dot = d\n best_w = w\n return best_w",
"def get_detected_intent(prediction):\n return max(prediction, key=lambda x: prediction[x])",
"def max(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no maximum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = -np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = max(m, np.max(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m",
"def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def find_max_tag(self, word):\n count = []\n for tag in self.pos_tags:\n count.append(self.tag_word_data.count((tag, word)))\n max_index = np.argmax(np.asarray(count))\n return self.pos_tags[max_index]",
"def find_largest_freq():\n words_list = {word for line in lines for word in line} # all words possible\n word_freqs = [(find_freq(word), word) for word in words_list] # list of tuples of words and their frequencies\n max_freq = max(word_freqs)\n return max_freq[0], max_freq[1]",
"def norm_max(vector):\n return la.norm(vector, np.inf)",
"def longest(self):\n longest = None\n max_len = 0\n for vector in self._vectors:\n len = vector.length2\n if len > max_len:\n longest = vector\n max_len = len\n return longest",
"def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w",
"def predict_max(self, x):\n y_ = self.predict(x)\n amax = torch.argmax(y_, dim=1).detach()\n t = torch.zeros_like(y_)\n t[torch.arange(y_.shape[0]),amax] = 1\n return t",
"def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def max_evidence(self):\n self.A = np.linalg.inv(self.Sn)\n A_eigval = np.linalg.eigvals(self.A)\n gamma = 0\n for i in range(len(A_eigval)):\n gamma += A_eigval[i]/(self.alpha + A_eigval[i])\n new_alpha = gamma/([email protected])\n\n sum = 0\n for i in range(self.n):\n sum +=(self.t[i][email protected]_matrix[i])**2\n new_beta = 1/((1/(self.n-gamma))*sum)\n\n return new_alpha, new_beta",
"def argmax2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmax())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmax())",
"def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]",
"def w2v_aggregation_letters(X, length_vector=100):\n global w2v_model_3gram\n if w2v_model_3gram == None:\n w2v_model_3gram = gensim.models.KeyedVectors.load_word2vec_format(os.path.join(os.environ['NOBULL_PATH'], 'w2v_char.vec'))\n X_raw = []\n for x in X:\n x_letter = cleanText_letters(x)\n X_raw.append(x_letter)\n\n\n num_row = len(X_raw)\n\n max_matrix = np.zeros(shape=(num_row, length_vector))\n\n average_matrix = np.zeros(shape=(num_row, length_vector))\n\n for row in range(num_row):\n \n temp_text = X_raw[row] \n temp_vector = temp_text.split()\n \n unique_vector = list(set(temp_vector))\n num_index = len(unique_vector)\n \n temp_matrix = np.zeros(shape=(num_index, length_vector))\n \n j = 0\n for word in unique_vector:\n \n temp_matrix[j] = get_vector(word, w2v_model_3gram, 100)\n j += 1\n\n max_matrix[row] = np.maximum.reduce(temp_matrix)\n average_matrix[row] = np.mean(temp_matrix, axis=0)\n \n result = np.concatenate((average_matrix, max_matrix), axis=1)\n result = sparse.csr_matrix(result)\n \n header = []\n \n for i in range(length_vector):\n temp_string = \"neww2v_average_\" + str(i) + \"-th\"\n header.append(temp_string)\n \n for i in range(length_vector):\n temp_string = \"neww2v_maximum_\" + str(i) + \"-th\"\n header.append(temp_string)\n\n return result, header",
"def find_max(centroids):\n \n max_sim = 0.0\n max_i = 0\n max_j = 0\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n curr_sim = similarity(centroids[i], centroids[j])\n if curr_sim > max_sim:\n max_sim = curr_sim\n max_i = i\n max_j = j\n\n return (max_i, max_j, max_sim)",
"def getMaxAlignmentScore(self):\n # get max of each row\n # max_scores = [max(i) for i in self.matrix]\n\n # return the max of the max vaules\n return numpy.max(self.matrix)"
]
| [
"0.66659194",
"0.6598962",
"0.6142109",
"0.61344445",
"0.60383004",
"0.57514024",
"0.57171726",
"0.5634056",
"0.56250244",
"0.552597",
"0.54989344",
"0.5482426",
"0.5477064",
"0.5463241",
"0.5459621",
"0.54456",
"0.5424129",
"0.54171747",
"0.5408878",
"0.54040873",
"0.5402832",
"0.5397905",
"0.5391357",
"0.5387444",
"0.5341275",
"0.534087",
"0.5339293",
"0.5310665",
"0.5305433",
"0.5301282"
]
| 0.7494539 | 0 |
Attentive matching operation. Arguments | def _attentive_matching(self, h1, h2, cosine_matrix, w):
# h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
h1 = self._time_distributed_multiply(h1, w)
# attentive vector (batch_size, h1_timesteps, embedding_szie)
attentive_vec = self._mean_attentive_vectors(h2, cosine_matrix)
# attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
attentive_vec = self._time_distributed_multiply(attentive_vec, w)
# matching vector, (batch_size, h1_timesteps, mp_dim)
matching = self._cosine_similarity(h1, attentive_vec)
return matching | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def matches(self, accession):\n pass",
"def matches(self, feature):\n pass",
"def matches(self):\n pass",
"def match(self, *args):\n return _ida_hexrays.udc_filter_t_match(self, *args)",
"def match(self, other):",
"def match(self, *args):\n return _ida_hexrays.microcode_filter_t_match(self, *args)",
"def match(self) -> \"MatchResult\":\n raise NotImplementedError",
"def handleMatch(self, m):\r\n pass",
"def additionalMatch(handIn, indx):",
"def match(self, item):",
"def match(self, product):\n\n raise NotImplementedError, 'need impletent match method'",
"def match(self) -> bool:",
"def matches(self, target):\n raise NotImplementedError()",
"def match(self, ctx):\n pass",
"def match(self, dc):\n raise NotImplemented",
"def findMatchesBetweenImages(image_1, image_2):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n # WRITE YOUR CODE HERE.\n\n sift = cv2.ORB_create()\n image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc, image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:10]\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(image_1_desc, image_2_desc, k=2)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n print(m.distance, n.distance, m.distance < .75*n.distance)\n if m.distance < (0.75 * n.distance):\n good.append([m])\n\n # We coded the return statement for you. You are free to modify it -- just\n # make sure the tests pass.\n print(len(good), good)\n return image_1_kp, image_2_kp, matches\n # END OF FUNCTION.",
"def _max_attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # max attentive vector (batch_size, h1_timesteps, embedding_szie)\n max_attentive_vec = self._max_attentive_vectors(h2, cosine_matrix)\n # max_attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n max_attentive_vec = self._time_distributed_multiply(max_attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, max_attentive_vec)\n return matching",
"def matches(self, actual: Any) -> MatchResult:\n raise NotImplementedError()",
"def matching_function(self):\n return self.matching",
"def match(self, inp):\n return 0",
"def syntax_matching(pair_attributes, distances, threshold):\n n = len(pair_attributes)\n entailments = [0 for foo in range(n+1)]\n results = [0 for foo in range(n+1)]\n # Calculates entailments and accuracy\n for i in range(n):\n t,h,id_num,e,ta = pair_attributes[i]\n id_num = int(id_num)\n entails = distances[i] < threshold\n entailments[id_num] = \"YES\" if entails else \"NO\"\n results[id_num] = 1 if entailments[id_num] == e else 0\n lexical.output_rte(entailments)\n print \"Threshold: \" + \"%.3f\"%threshold + \" Accuracy: \" + str(float(sum(results)) / float(n))",
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]",
"def _match(self):\n if self.algo == 'MLSTM':\n match_layer = MatchLSTMLayer(self.hidden_size)\n elif self.algo == 'BIDAF':\n match_layer = AttentionFlowMatchLayer(self.hidden_size)\n else:\n raise NotImplementedError('The algorithm {} is not implemented.'.format(self.algo))\n self.match_p_encodes, _ = match_layer.match(self.sep_p_encodes, self.sep_q_encodes,\n self.p_length, self.q_length)\n if self.use_dropout:\n self.match_p_encodes = tf.nn.dropout(self.match_p_encodes, self.dropout_keep_prob)",
"def test_actor_matches_activity(self):",
"def rpc_match():",
"def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores",
"def match(self, input_reader):\n pass",
"def infer_data(self):\n ibs = self.ibs\n # The two matching aids\n self.aid_pair = (self.aid1, self.aid2)\n (aid1, aid2) = self.aid_pair\n self.match_text = ibs.get_match_text(self.aid1, self.aid2)\n # The names of the matching annotations\n self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))\n self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))\n self.other_valid_nids = []\n # The other annotations that belong to these two names\n self.gts_list = ibs.get_annot_groundtruth((aid1, aid2))\n self.gt1, self.gt2 = self.gts_list\n # A flat list of all the aids we are looking at\n self.is_split_case = self.nid1 == self.nid2\n self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 + self.gt2)\n self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)\n self.other_aids = list(set(self.all_aid_list) - {self.aid1, self.aid2})\n\n if self.is_split_case:\n # Split case\n self.nCols = max(2, len(self.other_aids))\n self.nRows = 2 if len(self.other_aids) > 0 else 1\n else:\n # Merge/New Match case\n self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)\n self.nRows = 2\n self.nCols = min(self.max_cols, self.nCols)\n\n # Grab not just the exemplars\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))",
"def match_intent(self, utterances, _=None, __=None):\n best_intent = {}\n\n def take_best(intent, utt):\n nonlocal best_intent\n best = best_intent.get('confidence', 0.0) if best_intent else 0.0\n conf = intent.get('confidence', 0.0)\n if conf > best:\n best_intent = intent\n # TODO - Shouldn't Adapt do this?\n best_intent['utterance'] = utt\n\n for utt_tup in utterances:\n for utt in utt_tup:\n try:\n intents = [i for i in self.engine.determine_intent(\n utt, 100,\n include_tags=True,\n context_manager=self.context_manager)]\n if intents:\n utt_best = max(\n intents, key=lambda x: x.get('confidence', 0.0)\n )\n take_best(utt_best, utt_tup[0])\n\n except Exception as err:\n LOG.exception(err)\n\n if best_intent:\n self.update_context(best_intent)\n skill_id = best_intent['intent_type'].split(\":\")[0]\n ret = IntentMatch(\n 'Adapt', best_intent['intent_type'], best_intent, skill_id\n )\n else:\n ret = None\n return ret"
]
| [
"0.66484064",
"0.66251063",
"0.658383",
"0.65670645",
"0.65059847",
"0.6425929",
"0.638338",
"0.62863594",
"0.62749875",
"0.6251956",
"0.6106831",
"0.61030066",
"0.60764915",
"0.6075009",
"0.60259986",
"0.596115",
"0.5895979",
"0.58370674",
"0.5822391",
"0.5820696",
"0.5720142",
"0.5703429",
"0.5673038",
"0.56636614",
"0.56616855",
"0.5652849",
"0.5651055",
"0.5646551",
"0.5645332",
"0.56270933"
]
| 0.6913306 | 0 |
Max attentive matching operation. Arguments | def _max_attentive_matching(self, h1, h2, cosine_matrix, w):
# h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
h1 = self._time_distributed_multiply(h1, w)
# max attentive vector (batch_size, h1_timesteps, embedding_szie)
max_attentive_vec = self._max_attentive_vectors(h2, cosine_matrix)
# max_attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
max_attentive_vec = self._time_distributed_multiply(max_attentive_vec, w)
# matching vector, (batch_size, h1_timesteps, mp_dim)
matching = self._cosine_similarity(h1, max_attentive_vec)
return matching | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)",
"def get_max(im, class_name, dets, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n return [max_inds,score]",
"def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )",
"def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]",
"def max_(*args, **kwargs):\n ...",
"def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]",
"def findMaximal(freqSet):",
"def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]",
"def _maxAlien(self):\n maxA = 0\n for r in self._aliens:\n for y in r:\n if(y != None):\n maxA = max(maxA,y.x)\n return maxA",
"def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def max_apply(x): \n if len(x) == 1:\n return x[0]\n else:\n return x[1]",
"def argmax(d):\n return max(d.iteritems(), key=operator.itemgetter(1))",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]",
"def selector(self, dataset, attributes, target_attr):\n\n best_gain = 0.0\n best_attr = None\n \n for attr in attributes:\n gain = self.splitmetric(dataset, attr, target_attr)\n if (gain >= best_gain and attr != target_attr):\n best_gain = gain\n best_attr = attr\n \n return best_attr",
"def maxQ(self,feat):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n for a in self.actions:\r\n q = self.Q(feat,a)\r\n print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)",
"def get_detected_intent(prediction):\n return max(prediction, key=lambda x: prediction[x])",
"def f1max_score(pred, mode, namespace, ftype=1):\n results = run(pred, mode, namespace, ftype)\n return max(results[:,1])",
"def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i",
"def _metric_max_over_ground_truths(metric_fn: Callable[[str, str], Tensor], prediction: str, ground_truths: List[str]) ->Tensor:\n return max(metric_fn(prediction, truth) for truth in ground_truths)",
"def produce_max(self, *args, **kwargs):\n raise NotImplementedError('This interaction has no produce_max method yet!')",
"def structured_maximum(x, y):\r\n # see decorator for function body\r",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def max_op(*substrate_index_arrays):\n result = numpy.max(\n numpy.stack([x.flatten() for x in substrate_index_arrays]) *\n self.species_substrate_suitability_index_array, axis=0)\n result = result.reshape(substrate_index_arrays[0].shape)\n result[substrate_index_arrays[0] == _INDEX_NODATA] = _INDEX_NODATA\n return result",
"def mrv_max1(f, g, exps, x):\n u, b = f.union(g, exps)\n return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),\n u, b, x)",
"def _max_pooling_matching(self, h1, h2, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # h2 * weights, (batch_size, h2_timesteps, mp_dim, embedding_size)\n h2 = self._time_distributed_multiply(h2, w)\n # reshape v1 to (batch_size, h1_timesteps, 1, mp_dim, embedding_size)\n h1 = K.expand_dims(h1, axis=2)\n # reshape v1 to (batch_size, 1, h2_timesteps, mp_dim, embedding_size)\n h2 = K.expand_dims(h2, axis=1)\n # cosine similarity, (batch_size, h1_timesteps, h2_timesteps, mp_dim)\n cos = self._cosine_similarity(h1, h2)\n # (batch_size, h1_timesteps, mp_dim)\n matching = K.max(cos, axis=2)\n return matching",
"def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]",
"def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w",
"def maxfit(self, *args, **kwargs):\n return _image.image_maxfit(self, *args, **kwargs)"
]
| [
"0.64489824",
"0.6357069",
"0.6196171",
"0.61691546",
"0.6102832",
"0.6094383",
"0.60835683",
"0.60593593",
"0.605532",
"0.5972025",
"0.59391737",
"0.5898913",
"0.5883085",
"0.5875223",
"0.5875223",
"0.58261126",
"0.5818731",
"0.5808261",
"0.58059007",
"0.57965726",
"0.5792894",
"0.57903045",
"0.5778306",
"0.5760478",
"0.57577276",
"0.57489437",
"0.57169944",
"0.5712522",
"0.57099754",
"0.5705926"
]
| 0.6604338 | 0 |
Create a wrapped, monitored SubprocVecEnv for Atari. | def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
if wrapper_kwargs is None: wrapper_kwargs = {}
def make_env(rank): # pylint: disable=C0111
def _thunk():
env = make_atari(env_id)
env.seed(seed + rank)
env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
return wrap_deepmind(env, **wrapper_kwargs)
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_custom_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):\n if wrapper_kwargs is None: wrapper_kwargs = {}\n def make_env(rank): # pylint: disable=C0111\n def _thunk():\n env = gym.make(env_id)\n env.seed(seed + rank)\n env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))\n return env\n return _thunk\n #set_global_seeds(seed)\n return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])",
"def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, gamestate=None):\n if wrapper_kwargs is None: wrapper_kwargs = {}\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n seed = seed + 10000 * mpi_rank if seed is not None else None\n def make_thunk(rank):\n return lambda: make_env(\n env_id=env_id,\n env_type=env_type,\n subrank = rank,\n seed=seed,\n reward_scale=reward_scale,\n gamestate=gamestate,\n wrapper_kwargs=wrapper_kwargs\n )\n\n set_global_seeds(seed)\n if num_env > 1:\n return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])\n else:\n return DummyVecEnv([make_thunk(start_index)])",
"def make_envs(env_gen: Callable, n_envs: int = 8) -> SubprocVecEnv:\n envs = [env_gen(i) for i in range(n_envs)]\n subproc_env = SubprocVecEnv(envs)\n return subproc_env",
"def make_vec_env(self, env_id, seed, logger_dir=None, reward_scale=1.0, num_env=1, force_dummy=False, info_keywords=()):\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n seed = seed + 10000 * mpi_rank if seed is not None else None\n def make_thunk(rank, initializer=None):\n return lambda: self.make_env(\n env_id,\n seed,\n logger_dir=logger_dir,\n reward_scale=reward_scale,\n mpi_rank=mpi_rank,\n subrank=rank,\n info_keywords=info_keywords,\n )\n set_global_seeds(seed)\n\n if not force_dummy and num_env > 1:\n return SubprocVecEnv([make_thunk(i) for i in range(num_env)])\n else:\n return DummyVecEnv([make_thunk(i) for i in range(num_env)])",
"def create(self):\n os.environ.update(self.env)\n return self.core(*self.args)",
"def register_env_creator(self):\n raise NotImplementedError(\"Subclasses should implement this to call ray.tune.registry.register_env\")",
"def make_wrapped_env(seed=123,\n visualize=False, \n run_logs_dir=\"./run_logs/\", \n dMoments=None,\n step_timeout=10,\n integrator_accuracy = 5e-5,\n ):\n rank = 0 # MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n print(\"Making wrapped env\")\n env = IsolatedEnv(visualize=visualize,\n run_logs_dir=run_logs_dir,\n dMoments=dMoments,\n step_timeout=step_timeout,\n integrator_accuracy=integrator_accuracy\n )\n #print(\"IsolatedEnv: \", type(env))\n #env = ProstheticsEnv(visualize=visualize)\n #env = Monitor(env, os.path.join(logger.get_dir(), str(rank)))\n #print(\"h5pyEnvLogger:\")\n #env = h5pyEnvLogger(env, \"obs-logs\", str(rank))\n #print(\"h5pyEnvLogger:\", type(env))\n #env.seed(seed) # jw\n return env",
"def __init__(self, env):\n super().__init__(env)",
"def __init__(self, env):\n super().__init__(env)",
"def __init__(self, env):\n super().__init__(env)",
"def wrap_env(env, wrappers):\n for wrapper, args in wrappers:\n env = wrapper(env, **args)\n return env",
"def makeenv(outer=None):\n\n retval = {'outer': outer}\n return retval",
"def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:\n env = cls(*args)\n env.shared = True\n return env",
"def make_as_global(self):\n return setup(env=self)",
"def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()",
"def make_unity_env(env_directory, num_env, render=True, visual=True, start_index=0):\n def make_env(rank, use_visual=True): # pylint: disable=C0111\n def _thunk():\n unity_env = UnityEnvironment(env_directory, worker_id=rank, no_graphics=(not render), side_channels=[channel])\n env = UnityToGymWrapper(unity_env, rank)\n env = Monitor(env, (log_dir + \"_agentNo\" + str(rank)))\n return env\n return _thunk\n if visual:\n return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])\n else:\n rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n return DummyVecEnv([make_env(rank, use_visual=False)])",
"def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment",
"def create_local_venv(c, rebuild_venv=False):\n common.prepare_virtual_env(c, rebuild_venv)",
"def create(env, opts):\n return _get_env(env)(opts)",
"def make_env(self, env_id, seed, logger_dir=None, reward_scale=1.0, mpi_rank=0, subrank=0, info_keywords=()):\n scenario = scenarios.load('{}.py'.format(env_id)).Scenario()\n world = scenario.make_world()\n env_dict = {\n \"world\": world,\n 'reset_callback': scenario.reset_world,\n 'reward_callback': scenario.reward, \n 'observation_callback': scenario.observation,\n 'info_callback': None,\n 'done_callback': scenario.done, \n 'shared_viewer': True\n }\n env = gym.make('MultiAgent-v0', **env_dict)\n env.seed(seed + subrank if seed is not None else None)\n env = Monitor(env,\n logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True,\n info_keywords=info_keywords)\n env = ClipActionsWrapper(env)\n if reward_scale != 1.0:\n from baselines.common.retro_wrappers import RewardScaler\n env = RewardScaler(env, reward_scale)\n return env",
"def _run_env(self):\n raise NotImplementedError()",
"def make_env():\n aigym_path = \"video/\"\n env = retro.make(game='MortalKombatII-Genesis',state='Level1.LiuKangVsJax')\n env = wrappers.Monitor(env, aigym_path,video_callable=False ,force=True) #, video_callable=False \n env = ObservationWraperMK(env)\n env = PlayerOneNetworkControllerWrapper(env)\n env._max_episode_steps = 350\n #env.render()\n\n return env",
"def create_test_env(\n env_id: str,\n n_envs: int = 1,\n stats_path: Optional[str] = None,\n seed: int = 0,\n log_dir: Optional[str] = None,\n should_render: bool = True,\n hyperparams: Optional[Dict[str, Any]] = None,\n env_kwargs: Optional[Dict[str, Any]] = None,\n) -> VecEnv:\n # Create the environment and wrap it if necessary\n env_wrapper = get_wrapper_class(hyperparams)\n\n hyperparams = {} if hyperparams is None else hyperparams\n\n if \"env_wrapper\" in hyperparams.keys():\n del hyperparams[\"env_wrapper\"]\n\n vec_env_kwargs = {}\n vec_env_cls = DummyVecEnv\n if n_envs > 1 or \"Bullet\" in env_id:\n # HACK: force SubprocVecEnv for Bullet env\n # as Pybullet envs does not follow gym.render() interface\n vec_env_cls = SubprocVecEnv\n # start_method = 'spawn' for thread safe\n\n env = make_vec_env(\n env_id,\n n_envs=n_envs,\n monitor_dir=log_dir,\n seed=seed,\n wrapper_class=env_wrapper,\n env_kwargs=env_kwargs,\n vec_env_cls=vec_env_cls,\n vec_env_kwargs=vec_env_kwargs,\n )\n\n # Load saved stats for normalizing input and rewards\n # And optionally stack frames\n if stats_path is not None:\n if hyperparams[\"normalize\"]:\n print(\"Loading running average\")\n print(f\"with params: {hyperparams['normalize_kwargs']}\")\n path_ = os.path.join(stats_path, \"vecnormalize.pkl\")\n if os.path.exists(path_):\n env = VecNormalize.load(path_, env)\n # Deactivate training and reward normalization\n env.training = False\n env.norm_reward = False\n else:\n raise ValueError(f\"VecNormalize stats {path_} not found\")\n\n n_stack = hyperparams.get(\"frame_stack\", 0)\n if n_stack > 0:\n print(f\"Stacking {n_stack} frames\")\n env = VecFrameStack(env, n_stack)\n return env",
"def run_vasp_process(fresh_aiida_env, vasp_params, potentials, vasp_kpoints, vasp_structure, mock_vasp):\n\n def inner(inputs=None, settings=None, test_case=None, process_type='calcjob'):\n \"\"\"\n Run a VaspCalculation or VaspWorkChain with specified input and settings overrides.\n\n Specific outputs can be selected using the test_case parameter.\n\n The type of process is set with the process_type parameter.\n \"\"\"\n from aiida.engine import run\n\n inpts = AttributeDict()\n inpts.structure = vasp_structure\n parameters = vasp_params.get_dict()\n options = {\n 'withmpi': False,\n 'queue_name': 'None',\n 'resources': {\n 'num_machines': 1,\n 'num_mpiprocs_per_machine': 1\n },\n 'max_wallclock_seconds': 3600\n }\n if test_case is not None:\n # Allow to fetch special tests cases using the mock-vasp executable\n parameters['system'] = f'test-case:{test_case}'\n if process_type == 'calcjob':\n from aiida.plugins import CalculationFactory\n process = CalculationFactory('vasp.vasp')\n inpts.potential = get_data_class('vasp.potcar').get_potcars_from_structure(\n structure=inpts.structure, family_name=POTCAR_FAMILY_NAME, mapping=POTCAR_MAP\n )\n inpts.parameters = get_data_class('core.dict')(dict=parameters)\n inpts.metadata = {}\n inpts.metadata['options'] = options\n elif process_type == 'workchain':\n from aiida.plugins import WorkflowFactory\n process = WorkflowFactory('vasp.vasp')\n inpts.potential_family = get_data_node('core.str', POTCAR_FAMILY_NAME)\n inpts.potential_mapping = get_data_node('core.dict', dict=POTCAR_MAP)\n inpts.parameters = get_data_node('core.dict', dict={'incar': parameters})\n inpts.options = get_data_node('core.dict', dict=options)\n inpts.max_iterations = get_data_node('core.int', 1)\n inpts.clean_workdir = get_data_node('core.bool', False)\n inpts.verbose = get_data_node('core.bool', True)\n else:\n raise ValueError(\n f\"The supplied process_type: {process_type} is not supported. Use either 'calcjob' or 'workchain.'\"\n )\n\n mock_vasp.store()\n create_authinfo(computer=mock_vasp.computer, store=True)\n inpts.code = Code.get_from_string('mock-vasp@localhost')\n kpoints, _ = vasp_kpoints\n inpts.kpoints = kpoints\n if inputs is not None:\n # Allow overrides of the input\n inpts.update(inputs)\n if settings is not None and isinstance(settings, dict):\n inpts.settings = get_data_node('core.dict', dict=settings)\n results_and_node = run.get_node(process, **inpts)\n return results_and_node\n\n return inner",
"def make_env_local(stack=True, scale_rew=True, idx=6, frame_wrapper=WarpFrame, reward_type=None):\n from retro_contest.local import make\n\n all_level = train_level + test_level\n\n print(str(idx) + \": start game=\" + all_level[idx][0] + \", state=\" + all_level[idx][1])\n\n env = make(game=all_level[idx][0], state=all_level[idx][1])\n\n return wrap_env(env, stack, scale_rew, frame_wrapper, reward_type)",
"def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env",
"def create_rlgpu_env(**kwargs):\n # TODO (@arthur): leibnizgym parse task\n env = parse_vec_task(cli_args, task_cfg)\n # print the environment information\n print_info(env)\n # save environment config into file\n env.dump_config(os.path.join(logdir, 'env_config.yaml'))\n # wrap around the environment\n frames = kwargs.pop('frames', 1)\n if frames > 1:\n env = wrappers.FrameStack(env, frames, False)\n return env",
"def __init__(self, env: gym.Env) -> None:\n super(TerminateOnLaneInvasionWrapper, self).__init__(env=env)",
"def create_virtual_environment(self):\n cmd_venv = \"python\" + self.python_version + \" -m venv --prompt \\\"\" + self.project_name + \"\\\" \" + TaskCreator.venv_dir\n return {\n \"actions\": [cmd_venv],\n \"verbosity\": 2\n }",
"def create_venv(obj, venv_or_script: str,\n install_params: Iterable[str],\n clean: bool, update: bool) -> None:\n if not isinstance(obj, VenvConfig): # pragma: no cover\n raise TypeError(\"ctx.obj must be a VEnvConfig\")\n obj.create(venv_or_script, *install_params, clean=clean, update=update)"
]
| [
"0.6760588",
"0.60681564",
"0.58781356",
"0.57016915",
"0.55395484",
"0.5519906",
"0.55161077",
"0.5502174",
"0.5502174",
"0.5502174",
"0.5375453",
"0.53703487",
"0.53489786",
"0.53385574",
"0.5283076",
"0.5265281",
"0.52512294",
"0.5250794",
"0.5244433",
"0.5228819",
"0.5216743",
"0.5201119",
"0.5176898",
"0.51522386",
"0.51466596",
"0.51074684",
"0.510491",
"0.5103148",
"0.50998837",
"0.5085389"
]
| 0.6632491 | 1 |
Create an argparse.ArgumentParser for run_atari.py. | def atari_arg_parser():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
return parser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--path_annots', type=str, required=False,\n help='path to folder with annotations',\n default='annotations')\n parser.add_argument('-i', '--path_dataset', type=str, required=False,\n help='path to folder with dataset (images)',\n default='dataset')\n parser.add_argument('-o', '--path_output', type=str, required=False,\n help='path to the output directory - visualisation',\n default='output')\n parser.add_argument('--consensus', type=str, required=False,\n help='method for consensus landmarks',\n choices=['mean', 'median'], default='mean')\n parser.add_argument('--visual', required=False, action='store_true',\n help='export co-annotation visualisation', default=False)\n parser.add_argument('--nb_jobs', type=int, required=False,\n help='number of processes in parallel',\n default=NB_THREADS)\n return parser",
"def create_argument_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n prog=\"mafiabot\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Mafia Telegram Bot command line interface.\",\n )\n\n # parser.add_argument(\n # \"--argument\",\n # action=\"store_true\",\n # default=,\n # help=\"\",\n # )\n\n add_logging_options(parser)\n\n return parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/user/e/ehofgard/public/data/all_data')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n \n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def CreateArgumentParser():\n parser = argparse.ArgumentParser(description='Map code pages to paths')\n parser.add_argument('--native-library', type=str, default='libchrome.so',\n help=('Native Library, e.g. libchrome.so or '\n 'libmonochrome.so'))\n parser.add_argument('--reached-symbols-file', type=str,\n help='Path to the list of reached symbols, as generated '\n 'by tools/cygprofile/process_profiles.py',\n required=False)\n parser.add_argument('--residency', type=str,\n help='Path to JSON file with residency pages, as written'\n ' by extract_resident_pages.py', required=False)\n parser.add_argument('--build-directory', type=str, help='Build directory',\n required=True)\n parser.add_argument('--output-directory', type=str, help='Output directory',\n required=True)\n parser.add_argument('--arch', help='Unused')\n parser.add_argument('--start-server', action='store_true', default=False,\n help='Run an HTTP server in the output directory')\n parser.add_argument('--port', type=int, default=8000,\n help='Port to use for the HTTP server.')\n return parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser",
"def create_arg_parser():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '-f',\n '--file',\n required=True,\n help='Name of clean data file'\n )\n return arg_parser",
"def arg_parser():\n import argparse\n return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-i1\",\n help=\"\"\"viral alignments\"\"\",\n dest=\"viral\",\n required=True)\n parser.add_argument(\"-i2\",\n help=\"\"\"GTA alignments\"\"\",\n dest=\"gta\",\n required=True)\n parser.add_argument(\"-o\",\n dest=\"output\",\n help=\"output image file\")\n return parser",
"def create_cli_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", nargs=\"?\", help=\"path to yaml configuration file\")\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser",
"def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser",
"def create_parser():\n\n parser = argparse.ArgumentParser(description='Extract unique sensors')\n parser.add_argument('--file_name', help='File to extract unique sensors from.')\n parser.add_argument(\n '--url',\n help='A reference to SensorList.txt that specifies its location on a computer'\n 'network.'\n )\n parser.add_argument('--kat_sensor', required=True, help='Name of unique sensor')\n parser.add_argument('-v', metavar='verbosity', type=int, default=2, help='Logging'\n 'verbosity: 0 -critical, 1- error, 2 -warning, 3 -info, 4 -debug')\n\n args = parser.parse_args()\n return args",
"def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser",
"def parser_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\", type=str, help=\"yaml configuration file name\")\n return parser.parse_args()",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir',\n help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p",
"def make_argument_parser():\n arguments = argparse.ArgumentParser(\n add_help=True,\n usage='%(prog)s -s <sam_file> -l <lookup_table> -r <reference_fasta> [-o <outname>]',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='Provide a SAM file of SNP contextual sequences mapped\\nto a reference genome, an Illumina lookup table,\\nand a reference FASTA file\\n\\nThe lookup table should have two columns in this order:\\n\\tSNP ID\\n\\tIllumina-formatted sequence with SNP\\n\\nNo headers are allowed in the lookup table\\n'\n )\n arguments.add_argument(\n '-s',\n '--sam-file',\n dest='samfile',\n type=str,\n default=None,\n required=True,\n metavar='SAM FILE',\n help=\"Name of SAM file\"\n )\n arguments.add_argument(\n '-l',\n '--lookup-table',\n dest='lookup',\n type=str,\n default=None,\n required=True,\n metavar='LOOKUP TABLE',\n help=\"Name of lookup table\"\n )\n arguments.add_argument(\n '-r',\n '--reference',\n dest='reference',\n type=str,\n default=None,\n required=True,\n metavar='REFERENCE FASTA',\n help=\"Path to reference fasta file\"\n )\n arguments.add_argument(\n '-o',\n '--outname',\n dest='outname',\n type=str,\n default='output',\n required=False,\n metavar='OUTPUT NAME',\n help=\"Name of output file, without suffix. Defaults to 'output'\"\n )\n return arguments",
"def create_importer_parser():\n parser = argparse.ArgumentParser()\n _create_shared_args(parser)\n _create_importer_args(parser)\n\n return parser",
"def build_parser():\n def commaSplitter(str):\n \"\"\"\n Argparse a comm-seperated list\n \"\"\"\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')\n\n def existing_file(fname):\n \"\"\"\n Argparse type for an existing file\n \"\"\"\n if not os.path.isfile(fname):\n raise ValueError(\"Invalid file: \" + str(fname))\n return fname\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-p', '--prefix', help='dont really know what this does...',\n action='store', default='patient', dest='prefix')\n parser.add_argument('-d', '--date', help='dont really know what this does...',\n action='store', default='', dest='sampledate')\n parser.add_argument('template', type=argparse.FileType('r'), help='BEAST config template file')\n parser.add_argument('fasta', type=argparse.FileType('r'), help='file of sequences (in FASTA format)')\n\n return parser",
"def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser",
"def create_cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('json_file', action='store',\n type=str, help=('Outlier per threshold file. This file '\n 'should have been generated by the '\n 'calculate_outliers_by_threshold '\n 'script.'))\n return parser",
"def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser"
]
| [
"0.7437883",
"0.7176363",
"0.7108517",
"0.70549893",
"0.69963837",
"0.6963611",
"0.69511694",
"0.6942633",
"0.6907705",
"0.690574",
"0.6889434",
"0.68267024",
"0.6776402",
"0.67660815",
"0.67581695",
"0.6749728",
"0.6739547",
"0.67250913",
"0.6722918",
"0.6718469",
"0.6718469",
"0.6706136",
"0.67054665",
"0.6688983",
"0.66822857",
"0.667749",
"0.6656213",
"0.6648395",
"0.6631396",
"0.66299915"
]
| 0.78375757 | 0 |
Create an argparse.ArgumentParser for run_mujoco.py. | def mujoco_arg_parser():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
parser.add_argument('--play', default=False, action='store_true')
return parser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser",
"def create_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--path_annots', type=str, required=False,\n help='path to folder with annotations',\n default='annotations')\n parser.add_argument('-i', '--path_dataset', type=str, required=False,\n help='path to folder with dataset (images)',\n default='dataset')\n parser.add_argument('-o', '--path_output', type=str, required=False,\n help='path to the output directory - visualisation',\n default='output')\n parser.add_argument('--consensus', type=str, required=False,\n help='method for consensus landmarks',\n choices=['mean', 'median'], default='mean')\n parser.add_argument('--visual', required=False, action='store_true',\n help='export co-annotation visualisation', default=False)\n parser.add_argument('--nb_jobs', type=int, required=False,\n help='number of processes in parallel',\n default=NB_THREADS)\n return parser",
"def create_argument_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n prog=\"mafiabot\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Mafia Telegram Bot command line interface.\",\n )\n\n # parser.add_argument(\n # \"--argument\",\n # action=\"store_true\",\n # default=,\n # help=\"\",\n # )\n\n add_logging_options(parser)\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser",
"def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser",
"def make_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-p', '--platform', dest='platform', type=str, required=False, default='')\n return parser",
"def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser",
"def generate_parser():\n\n\tparser = argparse.ArgumentParser(description='MPI C compiler wrapper from ExaMPI.')\n\n\tparser.add_argument('--CC', default=None)\n\tparser.add_argument('--CXX', default=None)\n\tparser.add_argument('--lib', default=None)\n\tparser.add_argument('--inc', default=None)\n\tparser.add_argument('-o', dest='output', nargs=1)\n\n\tparser.add_argument('other', nargs=argparse.REMAINDER)\n\n\treturn parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/user/e/ehofgard/public/data/all_data')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n \n return parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser",
"def create_arg_parser():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '-f',\n '--file',\n required=True,\n help='Name of clean data file'\n )\n return arg_parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser",
"def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n # required args\n parser.add_argument('--project_id',\n help='Project id for project containing BQ data',\n default=KEY_FILE,\n type=str,\n required=True)\n\n # data and model args\n parser.add_argument('--training_budget',\n help='Training budget in hours',\n default=1,\n type=int)\n parser.add_argument('--key_file',\n help='JSON key file for API access',\n default=KEY_FILE,\n type=str)\n parser.add_argument('--location',\n help='GCP region to run',\n default=LOCATION,\n type=str)\n parser.add_argument('--automl_dataset',\n help='Name of AutoML dataset',\n default=AUTOML_DATASET,\n type=str)\n parser.add_argument('--automl_model',\n help='Name of AutoML model',\n default=AUTOML_MODEL,\n type=str)\n parser.add_argument('--bq_dataset',\n help='BigQuery dataset to import from',\n default=BQ_DATASET,\n type=str)\n parser.add_argument('--bq_table',\n help='BigQuery table to import from',\n default=BQ_TABLE,\n type=str)\n parser.add_argument('--batch_gcs_input',\n help='GCS URI for batch predict CSV',\n default=BATCH_GCS_INPUT,\n type=str)\n parser.add_argument('--batch_gcs_output',\n help='GCS URI for batch predict output',\n default=BATCH_GCS_OUTPUT,\n type=str)\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser",
"def CreateArgumentParser():\n parser = argparse.ArgumentParser(description='Map code pages to paths')\n parser.add_argument('--native-library', type=str, default='libchrome.so',\n help=('Native Library, e.g. libchrome.so or '\n 'libmonochrome.so'))\n parser.add_argument('--reached-symbols-file', type=str,\n help='Path to the list of reached symbols, as generated '\n 'by tools/cygprofile/process_profiles.py',\n required=False)\n parser.add_argument('--residency', type=str,\n help='Path to JSON file with residency pages, as written'\n ' by extract_resident_pages.py', required=False)\n parser.add_argument('--build-directory', type=str, help='Build directory',\n required=True)\n parser.add_argument('--output-directory', type=str, help='Output directory',\n required=True)\n parser.add_argument('--arch', help='Unused')\n parser.add_argument('--start-server', action='store_true', default=False,\n help='Run an HTTP server in the output directory')\n parser.add_argument('--port', type=int, default=8000,\n help='Port to use for the HTTP server.')\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir',\n help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser",
"def setup_parser():\n parser = argparse.ArgumentParser(\n prog=\"fedora-owner-change\")\n parser.add_argument(\n '--nomail', action='store_true',\n help=\"Prints the report instead of sending it by email\")\n parser.add_argument(\n '--debug', action='store_true',\n help=\"Outputs debugging info\")\n return parser",
"def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser",
"def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p",
"def build_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n description=\"Kafka support tool that takes a json file and publishes it incrementally to Kafka\")\n parser.add_argument(\n \"kafkaurl\",\n help=\"The Kafka urls you wish to publish messages to (comma separated list)\",\n type=str\n )\n parser.add_argument(\n \"kafkatopic\",\n help=\"The Kafka topic you wish to publish to\",\n type=str\n )\n parser.add_argument(\n \"messagefile\",\n help=\"The JSON file containing the messages you wish to publish\"\n )\n return parser",
"def create_parser():\n\n parser = argparse.ArgumentParser(description='Extract unique sensors')\n parser.add_argument('--file_name', help='File to extract unique sensors from.')\n parser.add_argument(\n '--url',\n help='A reference to SensorList.txt that specifies its location on a computer'\n 'network.'\n )\n parser.add_argument('--kat_sensor', required=True, help='Name of unique sensor')\n parser.add_argument('-v', metavar='verbosity', type=int, default=2, help='Logging'\n 'verbosity: 0 -critical, 1- error, 2 -warning, 3 -info, 4 -debug')\n\n args = parser.parse_args()\n return args",
"def _make_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(prog=\"pyrasaeco-render\", description=__doc__)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n subparsers.required = True\n\n once = subparsers.add_parser(\n \"once\", help=\"Render once the scenarios and the scenario ontology\"\n )\n\n continuously = subparsers.add_parser(\n \"continuously\",\n help=\"Re-render continuously the scenarios and the scenario ontology\",\n )\n\n continuously.add_argument(\n \"-p\",\n \"--port\",\n help=\"Port on which the demo server should listen to.\\n\\n\"\n \"If not specified, the demo server will not be started.\",\n type=int,\n )\n\n for command in [once, continuously]:\n command.add_argument(\n \"-s\",\n \"--scenarios_dir\",\n help=\"Directory where scenarios reside\\n\\n\"\n \"The rendering artefacts will be produced in-place in this directory.\",\n required=True,\n )\n\n return parser"
]
| [
"0.74008834",
"0.7385079",
"0.734619",
"0.7270096",
"0.7255497",
"0.72188497",
"0.7181056",
"0.711708",
"0.7066124",
"0.7051665",
"0.69526327",
"0.69313985",
"0.69306207",
"0.6892286",
"0.6878253",
"0.6875072",
"0.68702185",
"0.6867738",
"0.6864742",
"0.6836086",
"0.6824759",
"0.6820681",
"0.6810506",
"0.6762997",
"0.67529726",
"0.67380416",
"0.67239577",
"0.6723485",
"0.6695855",
"0.66943127"
]
| 0.7750742 | 0 |
Create an argparse.ArgumentParser for run_halide.py. | def halide_arg_parser():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='HalideBlur-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-episodes', type=int, default=int(1e4))
parser.add_argument('--target', type=str, default='localhost:50051')
return parser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CreateArgumentParser():\n parser = argparse.ArgumentParser(description='Map code pages to paths')\n parser.add_argument('--native-library', type=str, default='libchrome.so',\n help=('Native Library, e.g. libchrome.so or '\n 'libmonochrome.so'))\n parser.add_argument('--reached-symbols-file', type=str,\n help='Path to the list of reached symbols, as generated '\n 'by tools/cygprofile/process_profiles.py',\n required=False)\n parser.add_argument('--residency', type=str,\n help='Path to JSON file with residency pages, as written'\n ' by extract_resident_pages.py', required=False)\n parser.add_argument('--build-directory', type=str, help='Build directory',\n required=True)\n parser.add_argument('--output-directory', type=str, help='Output directory',\n required=True)\n parser.add_argument('--arch', help='Unused')\n parser.add_argument('--start-server', action='store_true', default=False,\n help='Run an HTTP server in the output directory')\n parser.add_argument('--port', type=int, default=8000,\n help='Port to use for the HTTP server.')\n return parser",
"def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/user/e/ehofgard/public/data/all_data')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n \n return parser",
"def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser",
"def setup_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description='evaluate dynamic shielding with water_tank benchmarks')\n\n parser.add_argument('--steps', type=int, default=int(2e5),\n help='number of steps that each environment is run.')\n parser.add_argument('--learning-rate', type=float, default=1e-3,\n help='learning rate')\n parser.add_argument('--shield', type=str, default='pre-adaptive',\n help='the shield to be used [pre-adaptive (default) | pre-dynamic | safe-padding | '\n 'post-dynamic | no]')\n parser.add_argument('--shield-life', type=int, default=100,\n help='frequency of shield reconstruction in terms of episodes.')\n parser.add_argument('--depths', nargs='+', default=[1, 3, 5, 7],\n help='a list of min-depths for dynamic shield (usage: --depths 0 1 3)')\n parser.add_argument('--penalties', nargs='+', default=[0.0, 1.0, 10.0, 100.0],\n help='a list of penalties that it is used in no shield (usage: --penalties 0.0 1.0 100.0)')\n return parser",
"def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser",
"def create_cli_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", nargs=\"?\", help=\"path to yaml configuration file\")\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser",
"def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def make_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-p', '--platform', dest='platform', type=str, required=False, default='')\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser",
"def create_argument_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n prog=\"mafiabot\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Mafia Telegram Bot command line interface.\",\n )\n\n # parser.add_argument(\n # \"--argument\",\n # action=\"store_true\",\n # default=,\n # help=\"\",\n # )\n\n add_logging_options(parser)\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser",
"def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)",
"def create_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--path_annots', type=str, required=False,\n help='path to folder with annotations',\n default='annotations')\n parser.add_argument('-i', '--path_dataset', type=str, required=False,\n help='path to folder with dataset (images)',\n default='dataset')\n parser.add_argument('-o', '--path_output', type=str, required=False,\n help='path to the output directory - visualisation',\n default='output')\n parser.add_argument('--consensus', type=str, required=False,\n help='method for consensus landmarks',\n choices=['mean', 'median'], default='mean')\n parser.add_argument('--visual', required=False, action='store_true',\n help='export co-annotation visualisation', default=False)\n parser.add_argument('--nb_jobs', type=int, required=False,\n help='number of processes in parallel',\n default=NB_THREADS)\n return parser",
"def _make_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(prog=\"pyrasaeco-render\", description=__doc__)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n subparsers.required = True\n\n once = subparsers.add_parser(\n \"once\", help=\"Render once the scenarios and the scenario ontology\"\n )\n\n continuously = subparsers.add_parser(\n \"continuously\",\n help=\"Re-render continuously the scenarios and the scenario ontology\",\n )\n\n continuously.add_argument(\n \"-p\",\n \"--port\",\n help=\"Port on which the demo server should listen to.\\n\\n\"\n \"If not specified, the demo server will not be started.\",\n type=int,\n )\n\n for command in [once, continuously]:\n command.add_argument(\n \"-s\",\n \"--scenarios_dir\",\n help=\"Directory where scenarios reside\\n\\n\"\n \"The rendering artefacts will be produced in-place in this directory.\",\n required=True,\n )\n\n return parser",
"def build_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help='Blogstrap commands')\n init_parser = subparsers.add_parser(\n 'init',\n help='Initialize the Blogstrap directory')\n init_parser.set_defaults(func=init)\n init_parser.add_argument('-t', '--target',\n dest='target',\n type=str,\n default='.',\n help='Target folder to generate files in')\n init_parser.add_argument('--no-homepage',\n action='store_true',\n default=False,\n help='if specified, no homepage will be created')\n run_parser = subparsers.add_parser(\n 'run', help=\"Run the Flask development server\")\n run_parser.set_defaults(func=run)\n run_parser.add_argument('-c', '--config',\n dest='config',\n type=str,\n default=None,\n help='path to a config file')\n\n return parser",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-i1\",\n help=\"\"\"viral alignments\"\"\",\n dest=\"viral\",\n required=True)\n parser.add_argument(\"-i2\",\n help=\"\"\"GTA alignments\"\"\",\n dest=\"gta\",\n required=True)\n parser.add_argument(\"-o\",\n dest=\"output\",\n help=\"output image file\")\n return parser",
"def create_cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('json_file', action='store',\n type=str, help=('Outlier per threshold file. This file '\n 'should have been generated by the '\n 'calculate_outliers_by_threshold '\n 'script.'))\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n # required args\n parser.add_argument('--project_id',\n help='Project id for project containing BQ data',\n default=KEY_FILE,\n type=str,\n required=True)\n\n # data and model args\n parser.add_argument('--training_budget',\n help='Training budget in hours',\n default=1,\n type=int)\n parser.add_argument('--key_file',\n help='JSON key file for API access',\n default=KEY_FILE,\n type=str)\n parser.add_argument('--location',\n help='GCP region to run',\n default=LOCATION,\n type=str)\n parser.add_argument('--automl_dataset',\n help='Name of AutoML dataset',\n default=AUTOML_DATASET,\n type=str)\n parser.add_argument('--automl_model',\n help='Name of AutoML model',\n default=AUTOML_MODEL,\n type=str)\n parser.add_argument('--bq_dataset',\n help='BigQuery dataset to import from',\n default=BQ_DATASET,\n type=str)\n parser.add_argument('--bq_table',\n help='BigQuery table to import from',\n default=BQ_TABLE,\n type=str)\n parser.add_argument('--batch_gcs_input',\n help='GCS URI for batch predict CSV',\n default=BATCH_GCS_INPUT,\n type=str)\n parser.add_argument('--batch_gcs_output',\n help='GCS URI for batch predict output',\n default=BATCH_GCS_OUTPUT,\n type=str)\n return parser",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser",
"def setup_argparser():\n parser = argparse.ArgumentParser(\n description='Waldo client.')\n\n parser.add_argument('--server', dest='server',\n default=os.environ.get('WALDO_SERVER',\n consts.PRODUCTION))\n\n # Token/username/password can be passed in 3 ways:\n # Command-line, environment var, keyring\n parser.add_argument('--token', dest='token',\n default=os.environ.get(\n 'WALDO_TOKEN',\n keyring.get_password('waldoclient', 'token')),\n help='Racker auth token')\n\n parser.add_argument('--username', dest='username',\n default=os.environ.get(\n 'WALDO_USERNAME',\n keyring.get_password('waldoclient', 'username')),\n help='Racker SSO username. Securely store this '\n 'value in your keyring by running: '\n '`keyring set waldoclient username`.')\n\n parser.add_argument('--password', dest='password',\n default=os.environ.get(\n 'WALDO_PASSWORD',\n keyring.get_password('waldoclient', 'password')),\n help='Racker SSO password. Securely store this '\n 'value in your keyring by running: '\n '`keyring set waldoclient password`.')\n\n verbose = parser.add_mutually_exclusive_group()\n verbose.add_argument('--debug', dest='debug', action='store_true',\n default=False, help='output debug messages')\n verbose.add_argument('--quiet', dest='debug', action='store_true',\n help='suppress output debug messages')\n return parser",
"def create_arg_parser():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '-f',\n '--file',\n required=True,\n help='Name of clean data file'\n )\n return arg_parser",
"def get_parser():\n parser = ArgumentParser(description=\"Script used to generate Freeplane \"\n + \"mindmap files\")\n\n # This is use when people in Linaro aren't using their email address.\n parser.add_argument('--disable-altname', required=False,\n action=\"store_true\", default=False,\n help=\"Use alternative names (from cfg.yaml) to the tree\")\n\n parser.add_argument('--assignee', required=False,\n action=\"store_true\", default=False,\n help=\"Add assignees (from cfg.yaml) to the tree\")\n\n parser.add_argument('-a', '--author', required=False,\n action=\"store_true\", default=False,\n help=\"If set, git statistic only count the commit \"\n + \"from the author\")\n\n parser.add_argument('-p', '--path', required=False, action=\"store\",\n default=\"/home/jyx/devel/optee_projects/reference/linux\",\n help='Full path to the kernel tree')\n\n parser.add_argument('-s', '--since', required=False, action=\"store\",\n default=None,\n help='Used with the git log --since command')\n\n parser.add_argument('-o', '--output', required=False, action=\"store\",\n default=\"linux-kernel.mm\",\n help='Output filename')\n\n parser.add_argument('-v', required=False, action=\"store_true\",\n default=False,\n help='Output some verbose debugging info')\n\n return parser"
]
| [
"0.7197537",
"0.6993285",
"0.69330865",
"0.6906845",
"0.6859712",
"0.68452096",
"0.68273073",
"0.68136126",
"0.68066984",
"0.68066984",
"0.67940253",
"0.677414",
"0.676706",
"0.6746096",
"0.6712245",
"0.6703995",
"0.6702579",
"0.6688436",
"0.668237",
"0.6670573",
"0.66450006",
"0.66349256",
"0.6630293",
"0.660573",
"0.6594469",
"0.6589472",
"0.65826696",
"0.65642995",
"0.65484124",
"0.6546808"
]
| 0.7754485 | 0 |
Print out the pods of a Deployment and return a list of those lines. Abort if there's an error. | def showPods(dpl:str) -> List[str]:
tt = runCmd(f"kubectl -n {ns} get po --no-headers".split())
if tt.stderr:
print(f"Got error: {tt.stderr.decode()}")
sys.exit(1)
else:
tmp_output = tt.stdout.decode().splitlines()
output = []
for line in tmp_output:
if f"{dpl}-" in line:
print(line)
output.append(line)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_deployment_logs(namespace, name, tail_lines=TAIL_LINES_DEFAULT):\n pods = []\n try:\n api_response = k8s_client.list_namespaced_pod(namespace, label_selector='release={}'.format(name))\n for api_items in api_response.items:\n pods.append(api_items.metadata.name)\n except ApiException as e:\n print(\"Exception when calling CoreV1Api->list_namespaced_pod: %s\\n\" % e)\n\n # Iterate over list of pods and concatenate logs\n logs = \"\"\n try:\n for pod in pods:\n logs += pod + \"\\n\"\n logs += k8s_client.read_namespaced_pod_log(pod, namespace, tail_lines=tail_lines)\n except ApiException as e:\n logging.error(\"Exception when calling CoreV1Api->read_namespaced_pod_log: %s\\n\" % e)\n return logs",
"def list_pods(req, resp):\n namespace = req.params['namespace']\n return _get_all_pods(namespace)",
"def get_pods(namespace, k8s_cli, selector=\"\"):\n if selector:\n selector = '--selector=\"{}\"'.format(selector)\n cmd = '{} get pod -n {} {} -o json '.format(k8s_cli, namespace, selector)\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Failed to get pods: %s\", out)\n return None\n return json.loads(out)['items']",
"def list_pods_in_a_deployment(self, namespace, dc):\n pods_in_dc = None\n pods_in_namespace = self.list_pods_in_a_namespace(namespace=namespace, label_selector=\"deploymentconfig\")\n if pods_in_namespace:\n pod_list = pods_in_namespace.items\n pods_in_dc = [\n pod.metadata.name\n for pod in pod_list\n if pod.metadata.annotations[\"openshift.io/deployment-config.name\"] == dc\n ]\n return pods_in_dc",
"def get_pods(self, **kw):\n resource = self.pods\n\n try:\n pod_data = resource.get(**kw)\n log.info(pod_data)\n except exceptions.NotFoundError:\n log.error(\"Failed to get pods: resource not found.\")\n raise\n except Exception:\n log.error(\"Unexpected error.\")\n raise\n\n return [item.metadata.name for item in pod_data.items]",
"def download_logs(self, selected_components: Optional[List[Text]] = None, tail: int = DEFAULT_TAIL,\n noparse: bool = False) -> Tuple[Text, List[Text], List[Tuple[Text, Text]]]:\n # get the current namespace\n namespace = self._kubectl.get_namespace()\n\n # get all available pods\n try:\n available_pods: List[KubernetesResource] = self._kubectl.get_resources(\"pods\")\n except CalledProcessError:\n # if a CalledProcessError is raised when gathering pods, surface an error up to stop the program #\n raise KubectlRequestForbiddenError(f\"Listing pods is forbidden in namespace [{namespace}]. \"\n \"Make sure KUBECONFIG is correctly set and that the correct namespace \"\n \"is being targeted. A namespace can be given on the command line using \"\n \"the \\\"--namespace=\\\" option.\")\n\n # if an error didn't occur getting pods, but none were found, raise a NoPodsError\n if len(available_pods) == 0:\n raise NoPodsError(f\"No pods were found in namespace [{namespace}].\")\n\n # create a list to hold the pods that will be run against\n selected_pods: List[KubernetesResource] = list()\n\n # filter list of pods down to selected components\n if selected_components is not None and len(selected_components) > 0:\n for pod in available_pods:\n # check for a component-name annotation match\n if pod.get_sas_component_name() in selected_components:\n selected_pods.append(pod)\n\n # check if the pod has a container with a name in the selected components list\n else:\n containers: List[Dict] = pod.get_spec_value(KubernetesResource.Keys.CONTAINERS)\n for container in containers:\n name: Text = container.get(\"name\")\n\n if name in selected_components:\n selected_pods.append(pod)\n # break the loop and move to the next pod since this pod is now in the list\n break\n\n # if no pods matched the selected_components filter, raise a NoMatchingPodsError\n if len(selected_pods) == 0:\n raise NoMatchingPodsError(f\"No pods in namespace [{namespace}] matched the provided components filter: \"\n f\"[{', '.join(selected_components)}]\")\n else:\n # if selected components weren't provided, use the complete list of available pods\n selected_pods.extend(available_pods)\n\n # a NoPodsError would have been raised if there are no pods in the namespace and a NoMatchingPodsError would\n # have been raised if no pods matched the selected_components\n # at this point, pods are available for gathering logs\n\n # create the output dir, if needed\n self._output_dir = os.path.join(self._output_dir, datetime.datetime.now().strftime(\"%Y-%m-%dT%H_%M_%S\"))\n os.makedirs(self._output_dir, exist_ok=True)\n\n # create the list of pooled asynchronous processes\n write_log_processes: List[_LogDownloadProcess] = list()\n for pod in selected_pods:\n process = self._pool.apply_async(self._write_log, args=(self._kubectl, pod, tail, self._output_dir,\n noparse))\n download_process = _LogDownloadProcess(pod.get_name(), process)\n write_log_processes.append(download_process)\n\n # run the processes\n timeout_pods: List[Text] = list()\n error_pods: List[Tuple[Text, Text]] = list()\n for process in write_log_processes:\n try:\n err_info: Optional[List[Tuple[Optional[Text], Text]]] = process.get_process().get(timeout=self._wait)\n\n # add the error message, if returned\n if err_info:\n error_pods.extend(err_info)\n except TimeoutError:\n timeout_pods.append(process.get_pod_name())\n\n return os.path.abspath(self._output_dir), timeout_pods, error_pods",
"def get_list_of_containers_from_pod(namespace, pod_name, k8s_cli):\n cmd = \"{} get pod {} -o jsonpath=\\\"{{.spec.containers[*].name}}\\\" -n {}\".format(k8s_cli, pod_name, namespace)\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Failed to get containers from pod: %s\", out)\n return None\n return out.split()",
"def dinghy_get_pod_logs(req, resp):\n resp.content = api.template(\n 'pod_logs.html'\n )",
"def get_pod_logs(self, label_selector, namespace, container):\n pods = self.get_pods(label_selector, namespace).items\n return [\n self.core_client.read_namespaced_pod_log(\n name=pod.metadata.name, namespace=namespace, container=container\n )\n for pod in pods\n ]",
"def get_pods(self, label_selector, namespace):\n return self.core_client.list_namespaced_pod(namespace, label_selector=label_selector, watch=False)",
"def dinghy_deployment_logs(req, resp, *, \n namespace, name,\n tail_lines=TAIL_LINES_DEFAULT,\n preview=LOGS_PREVIEW_LENGTH):\n if 'tail_lines' in req.params.keys():\n tail_lines = req.params['tail_lines']\n logs = _get_deployment_logs(namespace, name, tail_lines)\n logs_preview = logs[0:preview]\n \n\n if 'json' in req.params.keys():\n if 'preview' in req.params.keys():\n resp.media = {\"logs\": logs_preview}\n else:\n resp.media = {\"logs\": logs}\n else:\n resp.content = api.template(\n 'pod_logs_output.html',\n logs=logs\n )",
"def get_pods_using_pvc(pvc, namespace):\n pods = api.list_pods(namespace)\n mounted_pods = []\n\n for pod in pods.items:\n pvcs = get_pod_pvcs(pod)\n if pvc in pvcs:\n mounted_pods.append(pod)\n\n return mounted_pods",
"def get_pod_names(namespace, k8s_cli, selector=\"\"):\n pods = get_pods(namespace, k8s_cli, selector)\n if not pods:\n logger.info(\"Namespace '%s': Cannot find pods\", namespace)\n return []\n return [pod['metadata']['name'] for pod in pods]",
"def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')",
"def _service_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n max_len = {'name': 10,\n 'flags': 5,\n 'deployment': 20,\n 'config': 10}\n\n deploys = res['deploys'].values()\n if len(deploys) == 0:\n return \"\"\n\n for deploy in deploys:\n for key in ('name', 'deployment', 'config'):\n max_len[key] = max(len(deploy.get(key, '')), max_len.get(key, 0))\n\n lines = []\n header = \" \".join((\"ID\".ljust(max_len['name'], ' '),\n \"FLAGS\",\n 'DEPLOY'.ljust(max_len['deployment'], ' '),\n 'CONFIG'.ljust(max_len['config'], ' ')))\n lines.append(header)\n\n for deploy in res['deploys'].values():\n line = \" \".join((deploy['name'].ljust(max_len['name'], ' '),\n _build_flags(deploy).ljust(max_len['flags'], ' '),\n deploy['deployment'].ljust(max_len['deployment'], ' '),\n deploy['config'].ljust(max_len['config'], ' ')))\n lines.append(line)\n\n return \"\\n\".join(lines)",
"def get_container_names_per_pod(pod_namespace, pod_name):\n cmd = \"kubectl get pods {} -n {} \" \\\n \"-o jsonpath='{}'\".format(\n pod_name, pod_namespace, \"{.spec.containers[*].name}\",\n )\n try:\n output = subprocess.check_output(\n cmd, shell=True, stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError:\n pass\n output = output.decode().strip()\n if not output:\n log.error(\"Error: Could not collect pod container name(s) for {}/{}\"\n .format(pod_namespace, pod_name))\n return []\n return output.split(\" \")",
"def print_jobs(jobs):\n if len(jobs) > 0:\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(jobs)\n else:\n print('No jobs to print out')",
"def get_pods_status_iterator_by_labels(label_selector, host_ip_filter,\n must_exist=True):\n # TODO: Handle the case of a pod w/ multiple containers.\n # Right now, we pick the status of the first container in the pod.\n cmd = (\"kubectl get pods --all-namespaces -o wide\"\n \" --selector={} -o=jsonpath='{{range .items[*]}}\"\n \"{{@.metadata.name}}{{\\\" \\\"}}\"\n \"{{@.status.containerStatuses[0].ready}}{{\\\" \\\"}}\"\n \"{{@.status.phase}}{{\\\" \\\"}}\"\n \"{{@.spec.nodeName}}{{\\\" \\\"}}\"\n \"{{@.metadata.namespace}}{{\\\"\\\\n\\\"}}'\").format(label_selector)\n try:\n encoded_output = subprocess.check_output(\n cmd, shell=True, stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError as exc:\n log.error(\"command to get status of {} has \"\n \"failed. error code: \"\n \"{} {}\".format(label_selector,\n exc.returncode, exc.output))\n return\n output = encoded_output.decode()\n if output == \"\":\n if must_exist:\n log.warning(\"no pods with labels {} are running on the cluster\"\n .format(label_selector))\n return\n\n # kubectl field selector supports listing pods based on a particular\n # field. However, it doesn't support hostIP field in 1.9.6. Also,\n # it doesn't support set-based filtering. As a result, we will use\n # grep based filtering for now. We might want to switch to this\n # feature in the future. The following filter can be extended by\n # modifying the following kubectl custom-columns and the associated\n # grep command.\n host_ip_filter_cmd = \"kubectl get pods --no-headers \" \\\n \"-o=custom-columns=NAME:.metadata.name,\" \\\n \"HOSTIP:.status.hostIP --all-namespaces | grep -E \\\"{}\\\" | \" \\\n \"awk '{{print $1}}'\"\n host_ip_filter_cmd = host_ip_filter_cmd.format(\n \"|\".join(map(str, host_ip_filter)))\n try:\n filter_output = subprocess.check_output(\n host_ip_filter_cmd, shell=True, stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError as exc:\n log.error(\"command to list filtered pods has \"\n \"failed. error code: \"\n \"{} {}\".format(exc.returncode, exc.output))\n filter_output = filter_output.decode()\n if filter_output == \"\":\n if must_exist:\n log.error(\"No output because all the pods were filtered \"\n \"out by the node ip filter {}.\".format(\n host_ip_filter))\n return\n filtered_pod_list = filter_output.splitlines()\n\n for line in output.splitlines():\n # Example line:\n # name-blah-sr64c 0/1 CrashLoopBackOff\n # ip-172-0-33-255.us-west-2.compute.internal kube-system\n split_line = line.split(' ')\n if split_line[0] not in filtered_pod_list:\n continue\n yield PodStatus(name=split_line[0],\n ready_status=split_line[1],\n status=split_line[2],\n node_name=split_line[3],\n namespace=split_line[4])",
"def waitForDeployment(aNs: str, aDeploy: str):\n count = 1\n while count < MAX_WAIT:\n _ = showPods(aDeploy)\n specStr = \"{.spec.replicas}\"\n tt = runCmd(f\"kubectl -n {ns} get deployment {aDeploy} --output=jsonpath={specStr}\".split())\n if not tt.returncode == 0:\n print(tt.stderr.decode())\n sys.exit(1)\n else:\n replicas = tt.stdout.decode()\n print(f\"Replicas: {replicas}\")\n \n specStr = \"{.status.availableReplicas}\"\n tt = runCmd(f\"kubectl -n {ns} get deployment {aDeploy} --output=jsonpath={specStr}\".split())\n if not tt.returncode == 0:\n print(tt.stderr.decode())\n sys.exit(1)\n else:\n availableReplicas = tt.stdout.decode()\n print(f\"Available: {availableReplicas}\")\n\n if replicas == availableReplicas:\n break\n\n print()\n print(f\"{count}) sleeping for 1 ...\")\n time.sleep(1)\n count += 1\n\n else:\n print(f\"Error: The {aDeploy} deployment never returned to fully up\")\n sys.exit(1)",
"def collect_pods_logs(namespace, output_dir, k8s_cli, logs_from_all_pods=False):\n logger.info(\"Namespace '%s': Collecting pods' logs:\", namespace)\n logs_dir = os.path.join(output_dir, \"pods\")\n\n if logs_from_all_pods:\n pods = get_pod_names(namespace, k8s_cli)\n else:\n pods = []\n for selector in [\"app=redis-enterprise\", \"name=redis-enterprise-operator\"]:\n pods.extend(get_pod_names(namespace, k8s_cli, selector))\n\n if not pods:\n logger.warning(\"Namespace '%s' Could not get pods list - \"\n \"skipping pods logs collection\", namespace)\n return\n\n make_dir(logs_dir)\n\n for pod in pods:\n collect_logs_from_pod(namespace, pod, logs_dir, k8s_cli)",
"def get_jobs(k8s_ctx: str, selector: Optional[str] = None, dry_run: bool = False) -> List[str]:\n cmd = 'kubectl --context={k8s_ctx} get jobs -o json'\n if selector is not None:\n cmd += f' -l {selector}'\n if dry_run:\n logging.info(cmd)\n return list()\n\n p = safe_exec(cmd)\n if not p.stdout:\n # a small JSON structure is always returned, even if there are no jobs\n raise RuntimeError('Unexpected lack of output for listing kubernetes jobs')\n out = json.loads(p.stdout.decode())\n return [i['metadata']['name'] for i in out['items']]",
"def dinghy_post_pod_logs(req, resp, namespace=\"default\", tail_lines=TAIL_LINES_DEFAULT):\n if 'namespace' in req.params.keys():\n namespace = req.params['namespace']\n\n if 'tail_lines' in req.params.keys():\n tail_lines = req.params['tail_lines']\n\n resp.content = api.template(\n 'pod_logs_input.html',\n all_pods=_get_all_pods(namespace=namespace),\n tail_lines=tail_lines\n )",
"def get_deployment_data(cluster_id, namespace_id=None, deployment_id=None):\n # deployment detail\n if deployment_id and namespace_id is not None:\n # creating cell-pod mapping for getting cell details\n cell_pod_map = get_cell_pod_map(cluster_id)\n # getting pod count\n pods_data = [pod for pod in get_pod_data(cluster_id, namespace_id, deployment_id)]\n core_api = client.CoreV1Api()\n apps_api = client.AppsV1Api()\n deployment_cell_list = list()\n deployment_pod_list = list()\n for pod_name in pods_data:\n if pod_name['name'] in cell_pod_map:\n namespaced_pod_info = core_api.read_namespaced_pod(pod_name['name'], namespace_id).metadata.owner_references[0]\n if namespaced_pod_info.kind == 'ReplicaSet':\n replica_set = apps_api.read_namespaced_replica_set(core_api.read_namespaced_pod(\n pod_name['name'], namespace_id).metadata.owner_references[0].name, namespace_id)\n if replica_set.metadata.owner_references[0].name == deployment_id and pod_name['name'] in cell_pod_map \\\n and pod_name['status']=='Running':\n # fetching pods based on deployment\n deployment_pod_list.append(pod_name['name'])\n # fetching cells based on pods and deployment\n deployment_cell_list.append(cell_pod_map[pod_name['name']]['cell_name'])\n else:\n continue\n else:\n continue\n # if there are no pods for the passed deployment\n if len(deployment_pod_list) == 0:\n pods_for_resource_calculation = 'no_pod_resource'\n else:\n pods_for_resource_calculation = deployment_pod_list\n\n deployments_info = {\n 'resource_count': {\n 'cells': len(deployment_cell_list),\n 'pods': len(deployment_pod_list)\n },\n 'resource_info': get_resource_info(cluster_id, 'pods', namespace_id, pods_for_resource_calculation)\n }\n\n # deployment listing\n else:\n if namespace_id:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items]\n else:\n deployments_info = [{'value': deployment_item.metadata.name, 'label': deployment_item.metadata.name}\n for deployment_item in\n client.AppsV1beta2Api().list_deployment_for_all_namespaces().items]\n return deployments_info",
"def get_logs(k8s_ctx: str, label: str, containers: List[str], dry_run: bool = False):\n for c in containers:\n cmd = f'kubectl --context={k8s_ctx} logs -l {label} -c {c} --timestamps --since=24h --tail=-1'\n if dry_run:\n logging.info(cmd)\n else:\n try:\n # kubectl logs command can fail if the pod/container is gone, so we suppress error.\n # We can't combine it into one try-except-finally, because safe_exec should report\n # the command used in DEBUG level using old format with timestamps. New bare format\n # is used only after successful invocation of kubectl logs.\n proc = safe_exec(cmd)\n try:\n # Temporarily modify format for logging because we import true timestamps\n # from Kubernetes and don't need logging timestamps, so we just copy logs\n # verbatim.\n root_logger = logging.getLogger()\n orig_formatter = root_logger.handlers[0].formatter\n root_logger.handlers[0].setFormatter(logging.Formatter(fmt='%(message)s'))\n for line in proc.stdout.decode().split('\\n'):\n if line:\n logging.info(line)\n finally:\n # Ensure logging is restored to previous format\n # type is ignored because orig_formatter can be None\n # and there does not seem to be any other way to get\n # the original formatter from root logger\n root_logger.handlers[0].setFormatter(orig_formatter) # type: ignore\n except SafeExecError:\n pass",
"def list_pods_in_a_namespace(self, namespace, label_selector: Optional[str] = None):\n api_response = None\n try:\n api_response = self.ocp_pods.get(namespace=namespace, label_selector=label_selector)\n except ApiException as e:\n logger.error(\"Exception while getting pods: %s\\n\", e)\n return api_response",
"def list(deployment_id, all_workflows, logger, client, tenant_name):\n utils.explicit_tenant_name_message(tenant_name, logger)\n logger.info('Listing workflows for deployment %s...', deployment_id)\n deployment = client.deployments.get(deployment_id)\n\n workflows = sorted(deployment.workflows, key=lambda w: w.name)\n columns = WORKFLOW_COLUMNS\n hidden_count = 0\n if not all_workflows:\n total_count = len(workflows)\n workflows = [wf for wf in workflows if wf.is_available]\n hidden_count = total_count - len(workflows)\n else:\n columns = columns + ['is_available']\n\n defaults = {\n 'blueprint_id': deployment.blueprint_id,\n 'deployment_id': deployment.id\n }\n if not get_global_json_output():\n workflows = [_format_workflow(wf) for wf in workflows]\n print_data(columns, workflows, 'Workflows:', defaults=defaults)\n if hidden_count:\n logger.info('%d unavailable workflows hidden (use --all to show)',\n hidden_count)",
"def get_list_of_init_containers_from_pod(namespace, pod_name, k8s_cli):\n cmd = \"{} get pod {} -o jsonpath=\\\"{{.spec.initContainers[*].name}}\\\" -n {}\".format(k8s_cli, pod_name, namespace)\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Failed to get init containers from pod: %s\", out)\n return None\n return out.split()",
"def get_all_pod_names_in_a_namespace(self, namespace):\n list_of_pod_names = list()\n api_response = self.list_pods_in_a_namespace(namespace=namespace)\n if api_response:\n for item in api_response.items:\n list_of_pod_names.append(item[\"metadata\"][\"name\"])\n return list_of_pod_names",
"def print_config(self):\n for pod in self.pods:\n for lb in pod.limbs:\n print '%s limb %s ' % (pod.name, lb.name)\n for br in lb.branches:\n br.printInfo()\n sys.stdout.flush()",
"def form_input_pod_logs(req, resp, *, tail_lines=TAIL_LINES_DEFAULT):\n pod = req.params['pod']\n namespace = req.params['namespace']\n tail_lines = req.params['tail_lines']\n\n logs = _get_pod_logs(pod, namespace, tail_lines)\n\n resp.content = api.template(\n 'pod_logs_output.html',\n logs=logs\n )"
]
| [
"0.6920317",
"0.6094677",
"0.6093674",
"0.5900195",
"0.57868785",
"0.5718521",
"0.56805205",
"0.5641895",
"0.55443937",
"0.55176693",
"0.5475469",
"0.54315686",
"0.54242504",
"0.53848684",
"0.5346442",
"0.5276105",
"0.5257879",
"0.5232856",
"0.5193414",
"0.51866174",
"0.5184946",
"0.51206064",
"0.502564",
"0.5017473",
"0.50132024",
"0.50045484",
"0.4958651",
"0.49462238",
"0.4921734",
"0.4911839"
]
| 0.7404265 | 0 |
Check a Deployment and loop until it's spec.replicas == spec.availableReplicas. I.e., loop until the Deployment is fully up. Abort if there's any error. | def waitForDeployment(aNs: str, aDeploy: str):
count = 1
while count < MAX_WAIT:
_ = showPods(aDeploy)
specStr = "{.spec.replicas}"
tt = runCmd(f"kubectl -n {ns} get deployment {aDeploy} --output=jsonpath={specStr}".split())
if not tt.returncode == 0:
print(tt.stderr.decode())
sys.exit(1)
else:
replicas = tt.stdout.decode()
print(f"Replicas: {replicas}")
specStr = "{.status.availableReplicas}"
tt = runCmd(f"kubectl -n {ns} get deployment {aDeploy} --output=jsonpath={specStr}".split())
if not tt.returncode == 0:
print(tt.stderr.decode())
sys.exit(1)
else:
availableReplicas = tt.stdout.decode()
print(f"Available: {availableReplicas}")
if replicas == availableReplicas:
break
print()
print(f"{count}) sleeping for 1 ...")
time.sleep(1)
count += 1
else:
print(f"Error: The {aDeploy} deployment never returned to fully up")
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_for_deployment_complete(deployment_name):\n complete = False\n try:\n response = api.read_namespaced_deployment(deployment_name, namespace)\n status = response.status\n if (status.unavailable_replicas is None and\n (status.updated_replicas is None or\n status.updated_replicas == response.spec.replicas) and\n status.replicas == response.spec.replicas and\n status.ready_replicas == response.spec.replicas and\n status.observed_generation == response.metadata.generation):\n log.info(\"Deployment %s is ready\", deployment_name)\n complete = True\n else:\n log.info(\"Deployment %s is NOT ready\", deployment_name)\n except ApiException as exc:\n log.error(\"Exception when waiting for deployment status: %s\\n\", exc)\n return complete",
"def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple[\"Status\", dict]:",
"def wait_for_node_namespace_pods(node, namespace):\n pods_ready = False\n while not pods_ready:\n pods_ready = True\n time.sleep(15)\n pods = k8s.get('pod', namespace=namespace)\n for pod in pods:\n pod_status = pod['status']\n if pod_status.get('hostIP') == node: # We are not checking pods with an empty hostIP as we can't tell if it's actually waiting for the current host\n # Pod is located on the host we are monitoring\n pod_phase = pod_status['phase']\n if pod_phase != 'Succeeded': # A succeeded pod is successfully complete\n if pod_phase == 'Running':\n for container_status in pod_status['containerStatuses']:\n if not container_status['ready']:\n pods_ready = False\n break\n else: # A non-running, non-succeeded pod is not ready\n pods_ready = False\n break",
"def check_deployment(version_stack_name, app_name):\n\n print(\"Polling Target Group ({}) until a successful state is reached...\".format(version_stack_name))\n elbv2 = boto3.client('elbv2')\n waiter = elbv2.get_waiter('target_in_service')\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ALBTargetGroup'\n )\n target_group = response['StackResources'][0]['PhysicalResourceId']\n start_time = datetime.datetime.now()\n try:\n waiter.wait(TargetGroupArn=target_group)\n except botocore.exceptions.WaiterError:\n print('Health check did not pass!')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ECSService'\n )\n service = response['StackResources'][0]['PhysicalResourceId']\n print('Outputting events for service {}:'.format(service))\n response = cloudformation.describe_stack_resources(\n StackName=\"ECS-{}\".format(app_name),\n LogicalResourceId='ECSCluster'\n )\n cluster = response['StackResources'][0]['PhysicalResourceId']\n ecs = boto3.client('ecs')\n response = ecs.describe_services(\n cluster=cluster,\n services=[service]\n )\n for event in [x['message'] for x in response['services'][0]['events']]:\n print(event)\n# print('Deleting CloudFormation stack...')\n# response = cloudformation.delete_stack(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# waiter = cf.get_waiter('stack_delete_complete')\n# waiter.wait(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# print('CloudFormation stack deleted.')\n elapsed_time = datetime.datetime.now() - start_time\n print('Health check passed in {}'.format(elapsed_time))\n print(\"Done.\")",
"def wait_for_container():\n for i in xrange(30):\n print(\"Waiting for service to come up\")\n try:\n requests.get(URL).raise_for_status()\n return True\n except Exception as e:\n print e\n sleep(1)\n\n return False",
"def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))",
"def check_pod_pvc_status(self, skip_replication_resources=False):\n config.switch_to_cluster_by_name(self.preferred_primary_cluster)\n dr_helpers.wait_for_all_resources_creation(\n self.workload_pvc_count,\n self.workload_pod_count,\n self.workload_namespace,\n skip_replication_resources=skip_replication_resources,\n )",
"def test_get_deployment_runs1(self):\n pass",
"def insync_and_state_check(self):\n self.step('verifying tables are properly synced on all endpoints')\n is_ok = True\n limit, count = 10, 0\n while count < limit:\n try:\n state_check, rc = self.probe('/cluster/pyql/table/state/select')\n assert rc == 200, f\"something wrong happened when checking state table {rc}\"\n for state in state_check['data']:\n if not state['in_sync'] == True or not state['state'] == 'loaded':\n print(f\"found state which was not in_sync=True & 'loaded {state}, retrying\")\n is_ok = False\n self.sync_job_check()\n break\n if is_ok:\n break\n count+=1\n except Exception as e:\n print(f\"something wrong happened when checking state table\")\n break",
"def scale_up_deployments(self):\n try:\n for tier in config.TIERS[::-1]:\n for deployment in self.deployments[tier]:\n if deployment.get(\"scaled_down\", False) is True:\n step = \"Scaling Up Deployment:\\ndeployment={}\\nreplicas={}\".format(\n deployment[\"name\"], deployment[\"replicas\"]\n )\n self.slacker.send_thread_reply(step)\n self.kuber.set_deployment_replicas(\n deployment[\"name\"], deployment[\"replicas\"]\n )\n deployment[\"scaled_down\"] = False\n step = \"Verifying {} Deployments Scaled Up Successfully\".format(tier)\n self.slacker.send_thread_reply(step)\n for deployment in self.deployments[tier]:\n self.kuber.verify_deployment_update(deployment[\"name\"])\n except Exception as e:\n self.raise_step_error(step=step, error=e)",
"async def wait(self, collection, interval=60, timeout=600):\n end = time.time() + timeout\n\n not_responded = self.not_responding_instances(collection)\n\n def get_container(inst):\n try:\n inst.state.docker.get_containers()\n inst.state.docker.responded = True\n except DOCKER_RETRY_EXC:\n logger.debug(\"Docker not ready yet on %s\",\n str(inst.instance.id))\n except Exception as exc:\n logger.debug(\"Got exception on %s: %r\",\n str(inst.instance.id), exc)\n\n # Attempt to fetch until they've all responded\n while not_responded and time.time() < end:\n await gen.multi([collection.execute(get_container, x)\n for x in not_responded])\n\n # Update the not_responded\n not_responded = self.not_responding_instances(collection)\n\n if not_responded:\n await collection.wait(interval)\n\n # Prune the non-responding\n logger.debug(\"Pruning %d non-responding instances.\",\n len(not_responded))\n await collection.remove_instances(not_responded)",
"def test_03_sys_template_ready(self):\n\n # Validate the following\n # If SSVM is in UP state and running\n # 1. wait for listTemplates to show all builtin templates\n # downloaded for all added hypervisors and in “Ready” state\"\n\n for k, v in self.services[\"hypervisors\"].items():\n\n self.debug(\"Downloading BUILTIN templates in zone: %s\" % \n self.zone.id)\n \n list_template_response = list_templates(\n self.apiclient,\n hypervisor=v[\"hypervisor\"],\n zoneid=self.zone.id,\n templatefilter=v[\"templatefilter\"],\n listall=True,\n account='system',\n domainid=self.domain.id\n )\n\n # Ensure all BUILTIN templates are downloaded\n templateid = None\n for template in list_template_response:\n if template.templatetype == \"BUILTIN\":\n templateid = template.id\n\n # Wait to start a downloading of template\n time.sleep(self.services[\"sleep\"])\n \n while True and (templateid != None):\n \n timeout = self.services[\"timeout\"]\n while True: \n template_response = list_templates(\n self.apiclient,\n id=templateid,\n zoneid=self.zone.id,\n templatefilter=v[\"templatefilter\"],\n listall=True,\n account='system',\n domainid=self.domain.id\n )\n \n if isinstance(template_response, list):\n template = template_response[0]\n break\n \n elif timeout == 0:\n raise Exception(\"List template API call failed.\")\n \n time.sleep(1)\n timeout = timeout - 1\n \n # If template is ready,\n # template.status = Download Complete\n # Downloading - x% Downloaded\n # Error - Any other string \n if template.status == 'Download Complete' :\n break\n elif 'Downloaded' not in template.status.split():\n raise Exception\n elif 'Downloaded' in template.status.split():\n time.sleep(self.services[\"sleep\"])\n\n #Ensuring the template is in ready state\n time.sleep(self.services[\"sleep\"])\n \n timeout = self.services[\"timeout\"]\n while True: \n template_response = list_templates(\n self.apiclient,\n id=templateid,\n zoneid=self.zone.id,\n templatefilter=v[\"templatefilter\"],\n listall=True,\n account='system',\n domainid=self.domain.id\n )\n \n if isinstance(template_response, list):\n template = template_response[0]\n break\n \n elif timeout == 0:\n raise Exception(\"List template API call failed.\")\n \n time.sleep(1)\n timeout = timeout - 1\n \n self.assertEqual(\n isinstance(template_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n template = template_response[0]\n\n self.assertEqual(\n template.isready,\n True,\n \"Check whether state of template is ready or not\"\n )\n return",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def wait_for_ready(self, instance_id, limit, delay=1, pending=False):\r\n for count, new_instance in enumerate(repeat(instance_id), start=1):\r\n instance = self.get_instance(new_instance)\r\n last_reload = lookup(instance, 'lastOperatingSystemReload', 'id')\r\n active_transaction = lookup(instance, 'activeTransaction', 'id')\r\n\r\n reloading = all((\r\n active_transaction,\r\n last_reload,\r\n last_reload == active_transaction\r\n ))\r\n\r\n # only check for outstanding transactions if requested\r\n outstanding = False\r\n if pending:\r\n outstanding = active_transaction\r\n\r\n # return True if the instance has only if the instance has\r\n # finished provisioning and isn't currently reloading the OS.\r\n if instance.get('provisionDate') \\\r\n and not reloading and not outstanding:\r\n return True\r\n\r\n if count >= limit:\r\n return False\r\n\r\n sleep(delay)",
"def test_readiness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_ready, retries=30)",
"def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas",
"def test_get_deployment_runs(self):\n pass",
"def wait_for_instance_status(config, status):\n client = config.create_api_client()\n InstanceId = config.get('InstanceId')\n while True:\n time.sleep(20)\n req = DescribeInstancesRequest.DescribeInstancesRequest()\n result = do_action(client, req)\n items = result[\"Instances\"][\"Instance\"]\n lookups = {item['InstanceId']: item for item in items}\n if lookups[InstanceId]['Status'] == status:\n return\n else:\n click.echo(\"Instance's current status: {}; transfer to status {} ...\".format(\n lookups[InstanceId]['Status'], status\n ))",
"def _snapshot_ready(k8s_ctx: str, k8s_spec_file: pathlib.Path, dry_run: bool = False) -> bool:\n if not k8s_spec_file.exists():\n raise FileNotFoundError(str(k8s_spec_file))\n\n cmd = f'kubectl --context={k8s_ctx} get -f {k8s_spec_file} -o json'\n\n if dry_run:\n logging.info(cmd)\n return True\n\n p = safe_exec(cmd)\n if not p.stdout:\n return False\n\n json_output = json.loads(p.stdout.decode())\n if 'status' not in json_output or 'readyToUse' not in json_output['status']:\n return False\n\n if not isinstance(json_output['status']['readyToUse'], bool):\n raise UserReportError(returncode=CLUSTER_ERROR, message='Unexpected response when checking PVC snapshot readiness')\n\n return json_output['status']['readyToUse']",
"def _wait_for_snapshot(k8s_ctx: str, spec_file: pathlib.Path, attempts: int = 30, secs2wait: int = 20, dry_run: bool = False) -> None:\n for counter in range(attempts):\n if _snapshot_ready(k8s_ctx, spec_file, dry_run):\n break\n time.sleep(secs2wait)\n else:\n raise TimeoutError(f'{spec_file} timed out')",
"def test_wait_for_healthy(main_container):\n # This could take a while\n TIMEOUT = 180\n for i in range(TIMEOUT):\n inspect = main_container.inspect()\n status = inspect[\"State\"][\"Health\"][\"Status\"]\n assert status != \"unhealthy\", \"The container became unhealthy.\"\n if status == \"healthy\":\n break\n time.sleep(1)\n else:\n raise Exception(\n f\"Container status did transition to 'healthy' within {TIMEOUT} seconds.\"\n )",
"async def wait_for_deploy_fake(*args, **kwargs):\n wait_for_deploy_sync_mock(*args, **kwargs)",
"async def wait_for_deploy_fake(*args, **kwargs):\n wait_for_deploy_sync_mock(*args, **kwargs)",
"async def wait_for_deploy_fake(*args, **kwargs):\n wait_for_deploy_sync_mock(*args, **kwargs)",
"def _IsReady(self):\n return self._GetPod()['status']['phase'] != 'Pending'",
"def wait_for_any(self, revision_list):\n # TODO(simonhatch): Simplify these (and callers) since\n # get_revision_to_eval() no longer returns multiple revisions.\n # See http://crbug.com/546695.\n while True:\n if not revision_list or any(r.status == revision_state.RevisionState.NEW\n for r in revision_list): # pragma: no cover\n # We want to avoid waiting forever for revisions that are not started,\n # or for an empty list, hence we fail fast.\n assert False\n\n finished_revision = self.sleep_until_next_revision_ready(revision_list)\n\n # On recipe simulation, sleep_until_next_revision_ready will by default\n # return nothing.\n revisions = [finished_revision] if finished_revision else revision_list\n for revision in revisions:\n if revision:\n revision.update_status()\n if not revision.in_progress:\n return revision",
"def k8s_health_check(timeout, tries, try_sleep, healthz_endpoint):\n # pylint: disable-msg=broad-except\n rc = False\n _tries = tries\n\n valid_endpoints = {\n APISERVER_READYZ_ENDPOINT: 'apiserver',\n SCHEDULER_HEALTHZ_ENDPOINT: 'scheduler',\n CONTROLLER_MANAGER_HEALTHZ_ENDPOINT: 'controller_manager',\n KUBELET_HEALTHZ_ENDPOINT: 'kubelet'}\n\n if healthz_endpoint not in valid_endpoints:\n msg = \"Invalid endpoint: {}\".format(healthz_endpoint)\n LOG.error(msg)\n return rc\n endpoint_name = valid_endpoints.get(healthz_endpoint)\n\n while _tries:\n time.sleep(try_sleep)\n msg = \"Checking {} healthz (Remaining tries: {}\".format(endpoint_name, _tries)\n LOG.debug(msg)\n\n try:\n with time_limit(timeout):\n try:\n kwargs = {\"verify\": False, \"timeout\": 15}\n r = requests.get(healthz_endpoint, **kwargs)\n if r.status_code == 200:\n rc = True\n break\n except Exception:\n rc = False\n except TimeoutException:\n LOG.error('Timeout while checking k8s control-plane component health')\n rc = False\n _tries -= 1\n return rc",
"async def async_wait_start_success(self) -> None:\n try:\n coros = []\n if self.uses_before_pod is not None:\n coros.append(self.uses_before_pod.async_wait_start_success())\n if self.uses_after_pod is not None:\n coros.append(self.uses_after_pod.async_wait_start_success())\n if self.gateway_pod is not None:\n coros.append(self.gateway_pod.async_wait_start_success())\n if self.head_pod is not None:\n coros.append(self.head_pod.async_wait_start_success())\n for shard_id in self.shards:\n coros.append(self.shards[shard_id].async_wait_start_success())\n\n await asyncio.gather(*coros)\n self.logger.debug(f'Deployment started successfully')\n except:\n self.close()\n raise",
"def test_get_deployment_run(self):\n pass",
"def test_get_deployment(self):\n pass"
]
| [
"0.6113765",
"0.59651905",
"0.57463384",
"0.5624836",
"0.5491231",
"0.5444146",
"0.54303986",
"0.53961396",
"0.5384387",
"0.5310088",
"0.5265068",
"0.52341497",
"0.5220307",
"0.5184391",
"0.5174394",
"0.51594555",
"0.51520747",
"0.5116851",
"0.5104893",
"0.51041263",
"0.50581527",
"0.50178903",
"0.50178903",
"0.50178903",
"0.49834302",
"0.49811494",
"0.4978186",
"0.49621424",
"0.49491942",
"0.49477068"
]
| 0.73408425 | 0 |
Fetches command line history. Returns dict of all lines. | def hist():
history_dict = {}
# create history_list
for i in range(readline.get_current_history_length()):
history_dict[i+1] = (readline.get_history_item(i+1))
return history_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_history(*args, **kwargs):\n return collect_history(*args, **kwargs)",
"def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))",
"def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert",
"def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result",
"def history():",
"def History(self):\n return self.historydict.get('history', [])",
"def get_history(self):\n return self.__history[:]",
"def get_cache_history_items(self):\n #gdb.execute(\"p cache->history_items\")\n history_items = ZabbixHashset(gdb.parse_and_eval ('cache->history_items'))\n self.data = history_items.parse()",
"def get_history(self):\n return self.history",
"def history(self):\n return self.info['history']",
"def read():\n with open(GlobalVariables.get_instance().get('history_filename'), 'rb') as fin:\n lines = []\n for line in fin.readlines():\n lines.append(line.replace('\\n', '').strip())\n fin.close()\n return lines",
"def load_history(args):\n # history is a json document as a big dictionary\n # the keys are date/timestamps. The result is a list of pairlists\n # only will return the 'relevant' pairs, meaning the most recent\n # RELEVANT_HISTORY ones.\n if os.path.isfile(args.history):\n with open(args.history, 'r') as h:\n metahistory = json.load(h)\n else:\n metahistory = {}\n return prune_history(metahistory, args.relevant_history)",
"def history(command):\n namespace = app.main(command)\n assert namespace.command == 'h' or namespace.command == \"history\"",
"def keep_history(command):\n global history\n if command.split()[0] != 'replay':\n history.append(command)\n return history",
"def history(self):\n return self._history",
"def history(self):\n return self._history",
"def get_history(hdr):\n return hdr['HISTORY']",
"def history(self, maxresults=None, mindate=None):\n server = self._server._server.resource(self._server.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate,\n accountID=self._server.accountID, librarySectionID=self.sectionKey)",
"def get_history(self):\r\n\r\n return self.board_history",
"def history(self):\n return self.board.history",
"def get_history(page):\n headings = page.filter_headings()\n idx = [i for i, head in enumerate(headings) \n if 'History' in head or 'history' in head]\n if not idx:\n return \"\"\n sections = page.get_sections(include_headings=True)\n history = str(sections[idx[0]+1].strip_code())\n return history",
"def phist():\n history = hist();\n for line in history:\n print(line, \":\", history[line])",
"def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')",
"def _get_history_data(self) -> List[Dict[str, Any]]:\n try:\n with open(self._path.as_posix(), \"r\", encoding=\"utf-8\") as history_file:\n data = json.load(history_file)\n data.append(History._get_empty_session_object())\n return data\n except FileNotFoundError:\n self._path.touch()\n return History._get_empty_json_object()\n except json.decoder.JSONDecodeError:\n return History._get_empty_json_object()",
"def history(self, per_page=None, page=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'history')\r\n return http.Request('GET', url, params), parsers.parse_json",
"def history(self, key, _from='-', _to='+', _desc=True):\n return [self.klass.from_json(_object)\n for _object in self.storage.history(key, _from, _to, _desc)]",
"def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)",
"def load_history_strings(self) -> Iterable[str]:\n while False:\n yield",
"def init_readline():\n if g.command_line:\n return\n\n if has_readline:\n g.READLINE_FILE = os.path.join(get_config_dir(), \"input_history\")\n\n if os.path.exists(g.READLINE_FILE):\n readline.read_history_file(g.READLINE_FILE)\n dbg(c.g + \"Read history file\" + c.w)",
"def history(self, maxresults=9999999, mindate=None):\n server = self._server.resource(self.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate, accountID=self.accountID)"
]
| [
"0.67691666",
"0.6743514",
"0.6678989",
"0.6584406",
"0.65492815",
"0.65242946",
"0.6466275",
"0.6459106",
"0.64207",
"0.6397794",
"0.6306527",
"0.62967515",
"0.6266161",
"0.6215038",
"0.6183607",
"0.6183607",
"0.6160345",
"0.61151826",
"0.61032844",
"0.6101701",
"0.60996354",
"0.6066852",
"0.60612",
"0.6056437",
"0.5998234",
"0.59570986",
"0.5920037",
"0.5884747",
"0.58696747",
"0.5837251"
]
| 0.73838085 | 0 |
returns the line at said number | def getline(number):
number = int(number)
return readline.get_history_item(number) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_line(file, linenum):\n try:\n with open(file, \"r\") as f:\n return f.readlines()[linenum - 1].replace(\"\\n\", \"\")\n except:\n return f\"[ERROR]: could not open '{file}'\"",
"def get_line(self, lnum):\n return self._get_line(lnum - self.LINE_NUM_BASE)",
"def _get_line(self, lnum):\n start, end = self._get_linespan(lnum)\n return self.input[start:end]",
"def line(self, n):\n return self.__contents[n]",
"def getline(self, lnum=None):\n return self._vim.current.buffer[lnum] if lnum else self._vim.current.line",
"def line(self) -> int:",
"def line_no(self):\n return self._line_no",
"def _get_line(self, line: int) -> str:\n line_offsets_with_sentinel = self._line_offsets + [len(self._text)]\n return self._text[line_offsets_with_sentinel[line]:line_offsets_with_sentinel[line+1]]",
"def line_number(self):\n return self._line_number",
"def get_corresponding_lineno(self, lineno: int) -> int:\n for template_line, code_line in reversed(self.debug_info):\n if code_line <= lineno:\n return template_line\n return 1",
"def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None",
"def currentchunk(self, linenumber):\r\n\r\n if linenumber > self.total_lines:\r\n logger.warning(\"Line number requested is greater than total lines in file. Returning last line.\")\r\n linenumber = self.total_lines\r\n elif linenumber < 0:\r\n logger.warning(\"Line number requested is smaller than 0. Returning first line.\")\r\n linenumber = 0\r\n\r\n logger.debug(f\"Loading line {linenumber}\")\r\n if linenumber < self.linechunk:\r\n lineschunk = 0\r\n else:\r\n lineschunk = linenumber // self.linechunk * self.linechunk + 1\r\n return lineschunk",
"def __get_line(file_path: str, line_no: int, errors: str = 'ignore') -> str:\n try:\n with open(file_path, mode='r',\n encoding='utf-8', errors=errors) as f:\n for line in f:\n line_no -= 1\n if line_no == 0:\n return line\n return ''\n except IOError:\n LOG.error(\"Failed to open file %s\", file_path)\n return ''",
"def line(self) -> int:\n return self._line",
"def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1",
"def lineNumber( self ):\n return self.commands[0].lineNumber if len(self.commands) >= 1 else None",
"def line(self):\n return self[\"line\"]",
"def line(self):\n return self[\"line\"]",
"def line(self):\n return self[\"line\"]",
"def line(self):\n return self[\"line\"]",
"def get_line_no(obj):\n try:\n lineno = getsourcelines(obj)[1]\n except:\n # no code found\n lineno = None\n return lineno",
"def _get_linespan(self, lnum):\n lcount = self.get_linecount()\n _, q, _ = slice(lnum).indices(lcount)\n if q < 0 or q >= lcount:\n raise IndexError(\"line number %d not in 0..%d\" % (q, lcount))\n\n start = self.__linepos[q] + 1\n if q < lcount - 1:\n end = self.__linepos[q + 1]\n else:\n end = len(self.input) - 1\n\n return start, end + 1",
"def line(self):\n if self.__line is None:\n left = self.__source.rfind(\"\\n\", 0, self.__offset) + 1\n right = self.__source.find(\"\\n\", self.__offset)\n\n self.__line = self.__source[left : right]\n self.__lineOffset = self.__offset - left\n\n return self.__line",
"def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno",
"def _get_sample_offset(self, line_num):\n assert line_num <= self._count_lines_in_file(\n TestPaths.get_file_path(AFewLinesLogParams.FILE_NAME)\n )\n return line_num * AFewLinesLogParams.SINGLE_LINE_LENGTH",
"def get_current_line(self, document):\r\n return document.get_iter_at_mark(document.get_insert()).get_line() + 1",
"def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber",
"def linenum(self):\n return self.tos().linenum",
"def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]",
"def find_next_number(line, pos=0):\n m = number_re.search(line[pos:])\n if m:\n span = m.span()\n return (span[0]+pos,span[1]+pos)"
]
| [
"0.7743274",
"0.7708246",
"0.7590957",
"0.74724716",
"0.73670316",
"0.7362971",
"0.73194325",
"0.7204581",
"0.71783227",
"0.7120488",
"0.7118482",
"0.6980352",
"0.69720906",
"0.68986243",
"0.6873201",
"0.6799118",
"0.6754275",
"0.6754275",
"0.6754275",
"0.6754275",
"0.6750012",
"0.6666333",
"0.66365093",
"0.66016",
"0.6593438",
"0.6578958",
"0.65767974",
"0.6573616",
"0.65384823",
"0.65316564"
]
| 0.7848607 | 0 |
Prints the command line history. | def phist():
history = hist();
for line in history:
print(line, ":", history[line]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def history(command):\n namespace = app.main(command)\n assert namespace.command == 'h' or namespace.command == \"history\"",
"def history():",
"def print_history(self):\n self.game_started = False\n for state in self.history:\n self.__draw_board(state)",
"def print_history(hist):\n print_triangle(triangle)\n for past_triangle in hist:\n print_triangle(past_triangle)\n\n print()",
"def show_history_log(self):\n self.visual.print_enum(self.visual.history_log)",
"def _hist_show(ns, *args, **kwargs):\n try:\n commands = _hist_get(ns.session,\n slices=ns.slices,\n start_time=ns.start_time,\n end_time=ns.end_time,\n datetime_format=ns.datetime_format)\n except ValueError as err:\n print(\"history: error: {}\".format(err), file=sys.stderr)\n return\n if ns.reverse:\n commands = reversed(list(commands))\n if not ns.numerate and not ns.timestamp:\n for c, _, _ in commands:\n print(c)\n elif not ns.timestamp:\n for c, _, i in commands:\n print('{}: {}'.format(i, c))\n elif not ns.numerate:\n for c, ts, _ in commands:\n dt = datetime.datetime.fromtimestamp(ts).ctime()\n print('({}) {}'.format(dt, c))\n else:\n for c, ts, i in commands:\n dt = datetime.datetime.fromtimestamp(ts).ctime()\n print('{}:({}) {}'.format(i, dt, c))",
"def history_main(args=None, stdin=None):\n hist = builtins.__xonsh_history__\n ns = _hist_parse_args(args)\n if ns:\n _HIST_MAIN_ACTIONS[ns.action](ns, hist)",
"async def history(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def do_hist(self, args):\n print(self._hist)",
"def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def GetHistory(index=0):\n if index == \"clear\":\n state_mgr.entire_history = []\n else:\n print state_mgr.entire_history[int(index):]",
"def print_history(self, appconfigs, verbose, base, head):\n for appconfig in appconfigs:\n self.stdout.write(\n self.style.SUCCESS(\"Migrations for {} on database {}\".format(appconfig.name, appconfig.db.alias))\n )\n for rev in appconfig.script.walk_revisions(base=base or \"base\", head=head or \"heads\"):\n self.stdout.write(\n rev.cmd_format(verbose=verbose, include_branches=True, include_doc=True, include_parents=True)\n )",
"def history(name):\n from pybel.resources.arty import get_knowledge_history\n from pybel.resources.document import get_bel_knowledge_hash\n\n for path in get_knowledge_history(name):\n h = get_bel_knowledge_hash(path.as_posix())\n click.echo('{}\\t{}'.format(path, h))",
"def _append_history(ds):\n try:\n history = ds.attrs['history']\n except KeyError:\n history = \"\"\n now = datetime.datetime.now()\n prog = __file__ # os.path.basename(__file__)\n history = (now.strftime(\"%a %b %d %H:%M:%S %Y\") +\n \": {} {}\\n\".format(prog, \" \".join(sys.argv[1:])) +\n history)\n ds.attrs['history'] = history",
"def command_line_plot(history, x_precision=1, y_precision=20, x_start=0):\n whole_max = max(history)\n whole_min = min(history)\n\n history = history[x_start:]\n max_entry = max(history)\n min_entry = min(history)\n\n y_step = int((max_entry - min_entry) / y_precision)+1\n x_step = int(len(history) / x_precision)\n\n # plot the y axis and the data\n for y in range(int(min_entry), int(max_entry)+2, y_step)[::-1]:\n print('%10d |' % y, end='')\n for i in range(0, len(history), x_step):\n h = history[i]\n print('*' if y <= h < y+y_step else ' ', end='')\n print()\n\n # plot the x axis\n print(' '*11 + '-'*(x_precision+1))\n print(' '*12, end='')\n\n for t in range(0, len(history), x_step):\n if t/x_step % 10 == 0:\n label = '%d' % t\n print(label.ljust(10, ' '), end='')\n\n print(\"(+%d)\" % x_start)\n print(\"min/max y: %d, %d\" % (whole_min, whole_max))",
"def history(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n output = \"\"\n if message[1] == \"show\":\n if not self.data_base.has_history(user):\n output = \"you don't have any history\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = self.data_base.show_history(user)\n if len(output) > 4096:\n output = output[-4096::]\n self.data_base.log(user, update.message.text, \"Successfully showed history\")\n\n elif message[1] == \"clear\":\n if not self.data_base.has_history(user):\n output = \"your history is already clean\"\n else:\n self.data_base.clear_history(user)\n output = \"Clean\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = \"Looks like you have a little mistake\\n\" \\\n \"the correct way of using the /history command is:\\n\" \\\n \"/history show\\n\" \\\n \"/history clear\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)",
"def test_show_cmd():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.show_cmd'\n cmds = ['ls', 'cat hello kitty', 'abc', 'def', 'touch me', 'grep from me']\n\n def format_hist_line(idx, cmd):\n \"\"\"Construct a history output line.\"\"\"\n return ' {:d}: {:s}\\n'.format(idx, cmd)\n\n def run_show_cmd(hist_args, commands, base_idx=0, step=1):\n \"\"\"Run and evaluate the output of the given show command.\"\"\"\n stdout.seek(0, io.SEEK_SET)\n stdout.truncate()\n history._hist_main(hist, hist_args)\n stdout.seek(0, io.SEEK_SET)\n hist_lines = stdout.readlines()\n yield assert_equal, len(commands), len(hist_lines)\n for idx, (cmd, actual) in enumerate(zip(commands, hist_lines)):\n expected = format_hist_line(base_idx + idx * step, cmd)\n yield assert_equal, expected, actual\n\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n stdout = io.StringIO()\n saved_stdout = sys.stdout\n sys.stdout = stdout\n\n with mock_xonsh_env({'HISTCONTROL': set()}):\n for ts,cmd in enumerate(cmds): # populate the shell history\n hist.append({'inp': cmd, 'rtn': 0, 'ts':(ts+1, ts+1.5)})\n\n # Verify an implicit \"show\" emits show history\n for x in run_show_cmd([], cmds):\n yield x\n\n # Verify an explicit \"show\" with no qualifiers emits\n # show history.\n for x in run_show_cmd(['show'], cmds):\n yield x\n\n # Verify an explicit \"show\" with a reversed qualifier\n # emits show history in reverse order.\n for x in run_show_cmd(['show', '-r'], list(reversed(cmds)),\n len(cmds) - 1, -1):\n yield x\n\n # Verify that showing a specific history entry relative to\n # the start of the history works.\n for x in run_show_cmd(['show', '0'], [cmds[0]], 0):\n yield x\n for x in run_show_cmd(['show', '1'], [cmds[1]], 1):\n yield x\n\n # Verify that showing a specific history entry relative to\n # the end of the history works.\n for x in run_show_cmd(['show', '-2'], [cmds[-2]],\n len(cmds) - 2):\n yield x\n\n # Verify that showing a history range relative to the start of the\n # history works.\n for x in run_show_cmd(['show', '0:2'], cmds[0:2], 0):\n yield x\n for x in run_show_cmd(['show', '1::2'], cmds[1::2], 1, 2):\n yield x\n\n # Verify that showing a history range relative to the end of the\n # history works.\n for x in run_show_cmd(['show', '-2:'], \n cmds[-2:], len(cmds) - 2):\n yield x\n for x in run_show_cmd(['show', '-4:-2'], \n cmds[-4:-2], len(cmds) - 4):\n yield x\n\n sys.stdout = saved_stdout\n os.remove(FNAME)",
"def help_list(self):\n help_str = \"\"\"Lists command(s) from history in a flexible/searchable way.\n\n Usage: list [arg]\n\n Where arg is:\n no arg -> list most recent command\n arg is integer -> list one history item, by index\n a..b, a:b, a:, ..b -> list spans from a (or start) to b (or end)\n arg is string -> list all commands matching string search\n arg is /enclosed in forward-slashes/ -> regular expression search\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))",
"def run_history(self, expanded, unexpanded) : \n\t\toptions, args = self.getopt([\"clear\", \"user+\"], unexpanded)\n\t\tif (options is None) and (args is None) :\n\t\t\treturn -1\t# message was already displayed in self.getopt()\n\t\tif args :\n\t\t\treturn self.errormessage(\"Doesn't need any other argument\")\n\t\thistory = self.getHistory()\n\t\tif history is not None :\n\t\t\tif options.has_key(\"clear\") :\n\t\t\t\t# we want to clear it, UpdateHistory will take care of permissions\n\t\t\t\tself.UpdateHistory(\"history --clear\", clear=1)\n\t\t\telse :\n\t\t\t\t# we just want to see it.\n\t\t\t\t# Someone who can modify the .zshell_history can see all commands\n\t\t\t\tnewhistory = history.document_src()\n\t\t\t\tif not self.HasPerms(history, \"Change DTML Documents\", verbose=0) :\n\t\t\t\t\tif options.has_key(\"user\") :\n\t\t\t\t\t\treturn self.errormessage(\"You're not allowed to use this option\")\n\t\t\t\t\t# a non-Manager user can only see its commands\n\t\t\t\t\t(username, dummy) = self.WhoAmI()\n\t\t\t\t\tlines = filter(lambda line, u=username: line and (string.split(line, ',')[1] == u), string.split(newhistory, '\\n'))\n\t\t\t\t\tnewhistory = string.join(map(lambda line: string.split(line, ',')[2], lines), \"\\n\")\n\t\t\t\telse :\n\t\t\t\t\t# The person has sufficient permissions\n\t\t\t\t\t# to list only some username's commands\n\t\t\t\t\tnewh = []\n\t\t\t\t\tfor line in filter(None, string.split(newhistory, '\\n')) :\n\t\t\t\t\t\tcmduser = string.split(line, ',')[1]\n\t\t\t\t\t\t# not optimal, but works:\n\t\t\t\t\t\tif self.match_anystring(\"user\", cmduser, options) :\n\t\t\t\t\t\t\tnewh.append(line)\n\t\t\t\t\tnewhistory = string.join(newh, '\\n')\n\t\t\t\tself.htmlmessage(string.replace(newhistory, '\\n', '<BR>\\n'), safe=1)\n\t\t\t\tself.printf(\"%s\\n\" % newhistory)\n\t\telse :\n\t\t\treturn self.errormessage(\"No history available\")",
"def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)",
"def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))",
"def history():\n backup_history()\n yield\n reset_history()",
"def show_history(self, show_history):\n\n self._show_history = show_history",
"def show_history(self, show_history):\n\n self._show_history = show_history",
"def get(self):\n res = ''\n for hist in self.history:\n res += (str(hist) + '\\n')\n return res",
"def do_rxn_history(self, arg):\n print('RXN History currently only supports obtaining RxNorm History for Retired or Alien RxCUI')\n cmd_call = 'rxn_history'\n return self.run_cmd(arg, cmd_call)",
"def print_screen(history):\n\n # Extract the angles from the distance data. Usually this won't change \n # from scan to scan, but there are scanning algorithms where that may not \n # be the case \n angles = []\n for h in history:\n angles = list(set(angles + h.keys())) \n angles.sort()\n\n # Create a 2D grid of characters. Essentially a \"screen buffer\"\n buff = {}\n for angle in angles:\n buff[angle] = ' '.rjust(120)\n\n \n blips = ['.', '*', '#', '@']\n blips = blips[-len(history):] # if we only have 2, take last 2 blips\n\n # Plot blips onto buffer \n for h in history:\n blip = blips.pop(0) if len(blips) else '.'\n for angle in angles:\n if angle not in h: continue\n dist = h[angle]\n if dist < 120:\n buff[angle] = set_char_at(buff[angle], dist, blip)\n\n # Output\n print \"\\n\\n\\n\"\n for angle in angles:\n obstacle = 'x' if '@' in buff[angle][0:30] else ' '\n print \"%s %s %s\" % (str(angle).rjust(5), obstacle, buff[angle])\n print '20cm'.rjust(30) + '50cm'.rjust(30) + '1m'.rjust(50)"
]
| [
"0.7362004",
"0.70220554",
"0.6984039",
"0.68035895",
"0.6690122",
"0.6628696",
"0.66093284",
"0.65976226",
"0.6401826",
"0.6313359",
"0.6234132",
"0.6234132",
"0.6234132",
"0.61766535",
"0.61704844",
"0.6063267",
"0.6049553",
"0.6024649",
"0.60210574",
"0.5991742",
"0.5964676",
"0.5950077",
"0.5932275",
"0.59212834",
"0.59194356",
"0.5919023",
"0.5919023",
"0.59143436",
"0.59105474",
"0.5861261"
]
| 0.7108759 | 1 |
copies the line to your clipboard. Works on OS X. | def copyline(number):
my_line = getline(number)
my_command = "echo '" + my_line + "' | pbcopy"
os.system(my_command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clipboard_copy(text):\n result = subprocess.run(\n # \"primary\" because \"clipboard\" doesn't seem to work for all apps\n # you must paste with middle click\n [\"xclip\", \"-selection\", \"primary\", \"-l\", \"1\"],\n input=bytes(text, encoding=\"utf-8\")\n )\n if result.returncode == 0:\n pass\n else:\n print(\"Error copying\")",
"def _(event):\n deleted = line.delete_before_cursor(count=-line.document.get_start_of_line_position())\n line.set_clipboard(ClipboardData(deleted))",
"def clipboard(self, data):\n p = subprocess.Popen([\"xclip\", \"-selection\", \"clipboard\"], stdin=subprocess.PIPE)\n p.stdin.write(data.encode())\n p.stdin.close()",
"def copy_to_clipboard(self, txt):\r\n cmd = 'echo \"' + txt.strip() + '\"|clip'\r\n return subprocess.check_call(cmd, shell=True)",
"def copy_to_clipboard(input):\n #\n # Define Tk Window and Prevent from Showing\n #\n root = tk.Tk()\n root.withdraw()\n #\n # Clear Clipboard and Append Text\n #\n root.clipboard_clear()\n root.clipboard_append(input)",
"def set_clipboard(text):\r\n from Tkinter import Tk\r\n r = Tk()\r\n r.withdraw()\r\n r.clipboard_clear()\r\n r.clipboard_append(text)\r\n r.destroy()",
"def copyToClipboard(copy_str):\n\tcopier = Tk()\n\t# keep the window from showing\n\tcopier.withdraw()\n\tcopier.clipboard_clear()\n\t# text saved to clipboard\n\tcopier.clipboard_append(copy_str)\n\tcopier.destroy()",
"def write_to_paste_buffer(txt):\n pyperclip.copy(txt)",
"def do_copy_button( self, event ):\n #rint( \" do_copy_button -- this is all \")\n data = self.msg_text.get( 1.0, Tk.END )\n pyperclip.copy( data )",
"def copy(self, event):\n selection = self.get_selection()\n if not selection:\n return []\n start_row, start_col, end_row, end_col = selection\n data = u''\n rows = range(start_row, end_row + 1)\n for row in rows:\n columns = range(start_col, end_col + 1)\n for idx, column in enumerate(columns, 1):\n if idx == len(columns):\n # if we are at the last cell of the row, add new line instead\n data += self.GetCellValue(row, column) + \"\\n\"\n else:\n data += self.GetCellValue(row, column) + \"\\t\"\n text_data_object = wx.TextDataObject()\n text_data_object.SetText(data)\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(text_data_object)\n wx.TheClipboard.Close()\n else:\n wx.MessageBox(\"Can't open the clipboard\", \"Warning\")",
"def copy_to_clipboard(show_string, copy_string):\n copy_command = \"echo %s | pbcopy\" % (copy_string)\n print \"%s | bash='/bin/bash' param1='-c' param2='%s' terminal=false\" \\\n % (show_string, copy_command)",
"def copy_to_clipboard (string):\n #\n # an external program is used for copying the password to the clipboard\n #\n is_copied = False\n\n if sys.platform == \"darwin\":\n #\n # on OSX\n #\n clip_copy_exe = \"pbcopy\"\n elif 'DISPLAY' in os.environ:\n #\n # on Linux/Un*x\n #\n clip_copy_exe = \"xclip\"\n try:\n pb = subprocess.Popen(clip_copy_exe,\n stdin=subprocess.PIPE,\n stdout=open(\"/dev/null\", \"w\"),\n stderr=open(\"/dev/null\", \"w\"))\n pb.communicate (string)\n pb.wait ( )\n if pb.returncode == 0:\n is_copied = True\n else:\n is_copied = False\n logging.warning (\"Install '%s' for clipboard support\" % clip_copy_exe)\n except:\n is_copied = False\n\n return is_copied",
"def copyCommand(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n if len(rows) == 4:\n model = self.proxyModel.sourceModel()\n row = rows[3]\n column = columns[3]\n command = model.dataset.data[row][column].cell\n QApplication.clipboard().setText(command)",
"def _set_clipboard_text(text):\n clipboard = Clipboard()\n clipboard.set_text(text) # Restore previous clipboard text.\n clipboard.copy_to_system()",
"def set_clipboard(content, *args, **kwargs):\n G.DEVICE.set_clipboard(content, *args, **kwargs)",
"def read_from_clipboard():\n\n return pyperclip.paste()",
"def clip_copy(num):\n if g.browse_mode == \"ytpl\":\n\n p = g.ytpls[int(num) - 1]\n link = \"https://youtube.com/playlist?list=%s\" % p['link']\n\n elif g.browse_mode == \"normal\":\n item = (g.model.songs[int(num) - 1])\n link = \"https://youtube.com/watch?v=%s\" % item.ytid\n\n else:\n g.message = \"clipboard copy not valid in this mode\"\n g.content = generate_songlist_display()\n return\n\n if has_pyperclip:\n\n try:\n pyperclip.copy(link)\n g.message = c.y + link + c.w + \" copied\"\n g.content = generate_songlist_display()\n\n except Exception as e:\n xprint(link)\n xprint(\"Error - couldn't copy to clipboard.\")\n xprint(e.__doc__)\n xprint(\"\")\n input(\"Press Enter to continue.\")\n g.content = generate_songlist_display()\n\n else:\n g.message = \"pyperclip module must be installed for clipboard support\\n\"\n g.message += \"see https://pypi.python.org/pypi/pyperclip/\"\n g.content = generate_songlist_display()",
"def imageToClipboard(self, imagePath):\n subprocess.Popen([\"xclip\", \"-selection\", \"clipboard\", \"-t\", \"image/png\", \"-i\", imagePath])",
"def copy_entry(self, row, col):\n if self.results and self.settings['auto_copy']:\n row, col = self.table.currentRow(), self.table.currentColumn()\n to_copy = self.results[row][col]\n self.clipboard.setText(to_copy)",
"def on_copyButton_clicked(self):\n aw = self.__vm.activeWindow()\n if aw is not None:\n lines = []\n for index in range(self.historyList.count()):\n # selectedItems() doesn't seem to preserve the order\n itm = self.historyList.item(index)\n if itm.isSelected():\n lines.append(itm.text())\n eol = aw.getLineSeparator()\n txt = eol.join(lines) + eol\n aw.insert(txt)\n self.historyList.setFocus()",
"def copy(text, cmd=None):\n\n # If no command is specified (i.e. the config option is empty) try\n # to find a reasonable default based on the operating system\n if cmd is None:\n if sys.platform == 'darwin':\n cmd = 'pbcopy w'\n else: # For Linux, BSD, cygwin, etc.\n cmd = 'xclip -selection clipboard'\n\n _subprocess_copy(text, cmd.split())",
"def copy(self, *args):\n pwd = self.var_pwd.get()\n if pwd and not pwd.lower().startswith(\"error\"):\n self.clipboard_clear()\n self.clipboard_append(pwd)",
"def _image_to_clipboard(path):\n import PySide.QtGui\n image = PySide.QtGui.QImage(path)\n clipboard = PySide.QtGui.QApplication.clipboard()\n clipboard.setImage(image, mode=PySide.QtGui.QClipboard.Clipboard)",
"def copy_selection( self, ):\n try:\n data = self.msg_text.get( \"sel.first\", \"sel.last\" )\n pyperclip.copy( data )\n except Exception as exception: # if no selection\n pass",
"def __copyAction(self, act):\n QApplication.clipboard().setText(act.data())",
"def copy(to_end=False):\n # Find a way to generalize this for different systems\n if to_end:\n with open('/Users/john/Terminal Saved Output', 'r') as f:\n output = f.read().replace('bpython', 'Python')\n code = output.split('\\nPython')[-1]\n else:\n code = pyperclip.paste()\n pyperclip.copy(parse_code(code))\n return None",
"def gicp(line):\n import pyperclip\n import shlex\n args = shlex.split(line)\n if len(args) == 0:\n num_lines_prior = 1\n else:\n num_lines_prior = int(args[1])\n pyperclip.copy(In[-1-num_lines_prior])",
"def replaceClipboardWith(self, s: str) -> None:\n cb = self.qtApp.clipboard()\n if cb:\n # cb.clear() # unnecessary, breaks on some Qt versions\n s = g.toUnicode(s)\n QtWidgets.QApplication.processEvents()\n # Fix #241: QMimeData object error\n cb.setText(s)\n QtWidgets.QApplication.processEvents()\n else:\n g.trace('no clipboard!')",
"def _copyPath(self, items: List[QModelIndex]) -> None:\n if len(items) == 0:\n return\n items = [x for i, x in enumerate(items) if i % len(self._modelHeaders) == 0]\n items = [str(self._currPath.joinpath(self._model.itemFromIndex(x).text())) for x in items]\n pyperclip.copy(', '.join(items))\n self._statusBar.showMessage('Path copied to clipboard!', 3000)",
"def __copyToClipboard(self):\n itm = self.findList.selectedItems()[0]\n if itm.parent():\n fn = itm.parent().text(0)\n else:\n fn = itm.text(0)\n \n cb = QApplication.clipboard()\n cb.setText(fn)"
]
| [
"0.7116051",
"0.6889499",
"0.6693996",
"0.65692973",
"0.6540873",
"0.6487246",
"0.6455572",
"0.64535856",
"0.6359736",
"0.63467944",
"0.625682",
"0.62332565",
"0.61990815",
"0.6185667",
"0.60845",
"0.5983435",
"0.5977336",
"0.5970804",
"0.5968446",
"0.5907072",
"0.58960956",
"0.58922523",
"0.5871945",
"0.58390516",
"0.5833726",
"0.5817985",
"0.5816733",
"0.5785587",
"0.57311463",
"0.57277393"
]
| 0.72597814 | 0 |
Return the face indices of any face with a naked vertex | def getNakedFaceIDs(mesh):
nakedFaces = []
# Get naked vertices
nPts = list( mesh.GetNakedEdgePointStatus())
nIDs = [i for i,v in enumerate(nPts) if v == True]
for i in range(mesh.Faces.Count):
# Get face vertices
f = mesh.Faces.Item[i]
if f.IsTriangle:
vts = (f.A,f.B,f.C)
else:
vts = (f.A,f.B,f.C,f.D)
# Check if they are naked
naked = False
for vt in vts:
if vt in nIDs:
naked = True
if naked:
nakedFaces.append(i)
return nakedFaces | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def faces_from_vertex(self, vertex):\n assert isinstance(vertex, Vertex)\n return map(Face, self._top_exp.faces_from_vertex(vertex.topods_shape()))",
"def getVertexNumbers(self):\n return self.vertexIndex.keys()",
"def GetFaceToAdjacentFacesArray(self, p_int):\n ...",
"def out_vertices(self, vertex):\n return self[vertex].keys()",
"def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices",
"def GetFaceArray(self, p_int):\n ...",
"def get_vertices(self):\n return self.vertList.keys()",
"def get_vertex_keys(self):\n return self.vertList.keys()",
"def vertices(self):\r\n return self.adjacent.keys()",
"def _vertices(self, point):\n vertex_0, vertex_1, vertex_2 = tuple(\n gs.take(point, indices=self.faces[:, i], axis=-2) for i in range(3)\n )\n if point.ndim == 3 and vertex_0.ndim == 2:\n vertex_0 = gs.expand_dims(vertex_0, axis=0)\n vertex_1 = gs.expand_dims(vertex_1, axis=0)\n vertex_2 = gs.expand_dims(vertex_2, axis=0)\n return vertex_0, vertex_1, vertex_2",
"def vertex_ids(self):\n return self.get_ids()",
"def getVertices(self):\n return self.vertexIndex",
"def index_vertices(vertexlist, graph):\n return_list = list()\n for vertex in vertexlist:\n return_list.append(graph.vs.find(name=vertex).index)\n return return_list",
"def GetFaceToAdjacentFaces(self, p_int, int_tuple):\n ...",
"def vertex_multidegree(breakpoint_graph, vertex):\n return len(list(breakpoint_graph.get_edges_by_vertex(vertex)))",
"def idx_face(self, etype: str, *, elm) -> list:\n\n return self._shapef[self._element_name[elm]][etype].idx_face",
"def cube_coordinates(vertex, n):\n x, y, z = vertex\n return (\n [\n (x,y+1,z, x,y+1,z+1, x+1,y+1,z+1, x+1,y+1,z), # top\n (x,y,z, x,y,z+1, x+1,y,z+1, x+1,y,z), # bottom\n (x,y,z, x,y,z+1, x,y+1,z+1, x,y+1,z), # left\n (x+1,y,z, x+1,y,z+1, x+1,y+1,z+1, x+1,y+1,z), # right\n (x,y,z, x+1,y,z, x+1,y+1,z, x,y+1,z), # front\n (x,y,z+1, x+1,y,z+1, x+1,y+1,z+1, x,y+1,z+1), # back\n ],\n [\n (x, y+1, z), # Check block top\n (x, y-1, z), # Check block bottom\n (x-1, y, z), # Check block left\n (x+1, y, z), # Check block right\n (x, y, z-1), # Check block front\n (x, y, z+1), # Check block back\n ],\n )",
"def get_vertices(self):\n if self.vert_list.keys() != None:\n return self.vert_list.keys()\n raise KeyError(\"Vertex not found\")",
"def vertices(self):\n return self.keys()",
"def getFace(self, vertices):\n for f in self.faces:\n if f.vertices == vertices:\n return f\n raise ValueError('No face found')",
"def find_edges(mesh, key):\n for edge in mesh.edges:\n v = edge.vertices\n if key[0] == v[0] and key[1] == v[1]:\n return edge.index",
"def rook_neighbors_face(self, face):\n edges = self.cw_face_edges(face)\n return list(set([ self.left_region[edge] for edge in edges]))",
"def detect_faces(image):\n\n face_locations = face_recognition.face_locations(image)\n return face_locations",
"def find_isolated_vertices(self):\n graph = self.__graph_dict\n isolated = []\n for vertex in graph:\n # print(isolated,vertex)\n if not graph[vertex]:\n isolated += [vertex]\n return isolated",
"def get_faces(self):\n for i, j in combinations(self.gens, 2):\n c0 = self.triangle_verts[self.vertex_at_mirrors(i, j)]\n f0 = []\n m = self.cox_mat[i][j]\n H = (i, j)\n type = 0\n if self.active[i] and self.active[j]:\n type = 1\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (i, j) * k))\n f0.append(self.G.move(self.vtable, 0, (i, j) * k + (i,)))\n elif self.active[i] and m > 2:\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (j, i) * k))\n elif self.active[j] and m > 2:\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (i, j) * k))\n else:\n continue\n\n reps = set(self.word_generator(parabolic=H))\n reps = self.G.sort_words(reps)\n flist = []\n for word in reps:\n f = tuple(self.G.move(self.vtable, v, word) for v in f0)\n if None not in f and not helpers.check_duplicate_face(f, flist):\n center = self.transform(word, c0)\n coords = [self.vertices_coords[k] for k in f]\n face = DihedralFace(word, f, center, coords, type)\n flist.append(face)\n\n self.face_indices[(i, j)] = flist\n\n self.num_faces = sum(len(L) for L in self.face_indices.values())",
"def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j",
"def _interiorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n interiorIDs = numerix.concatenate((numerix.ravel(XYids[ ..., 1:-1]),\n numerix.ravel(XZids[:, 1:-1,:]),\n numerix.ravel(YZids[1:-1, ...].swapaxes(0, 1))))\n\n from fipy.variables.faceVariable import FaceVariable\n interiorFaces = FaceVariable(mesh=self, value=False)\n interiorFaces[interiorIDs] = True\n return interiorFaces",
"def get_vertices(self):\n return self.graph.keys()",
"def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))",
"def select_vertices(self):\n _filter = compas_rhino.rs.filter.point\n guids = compas_rhino.rs.GetObjects(message=\"Select Vertices.\", preselect=True, select=True, group=False, filter=_filter)\n if guids:\n guid_vertex = {}\n guid_vertex.update(self.guid_vertex_free)\n guid_vertex.update(self.guid_vertex_anchor)\n keys = [guid_vertex[guid] for guid in guids if guid in guid_vertex]\n else:\n keys = []\n return keys"
]
| [
"0.65702415",
"0.6509233",
"0.6472732",
"0.6163369",
"0.6108471",
"0.6058534",
"0.60497165",
"0.60435593",
"0.6026859",
"0.6016006",
"0.6011606",
"0.5981142",
"0.59727657",
"0.5972455",
"0.5950641",
"0.5911575",
"0.58671874",
"0.58626544",
"0.5853577",
"0.58423615",
"0.58422506",
"0.58274126",
"0.5791895",
"0.5789572",
"0.57891816",
"0.5777718",
"0.5761192",
"0.5757818",
"0.57509255",
"0.57340294"
]
| 0.76183254 | 0 |
Reads the requested number of bytes from the specified endpoint. | def read(self, endpoint, size):
return self.device.read(endpoint, size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, num_bytes_to_read):\n pass",
"def readfrom(self, addr: int, nbytes: int, stop: bool = True, /) -> bytes:",
"def readfrom(self, addr: int, nbytes: int, stop: bool = True, /) -> bytes:",
"def read(self, size: int=-1) -> bytes:\n ...",
"def read(self, size: int=-1) -> bytes:\n ...",
"def read(self, nbytes: int, /) -> Optional[bytes]:",
"def read(self, nbytes: Optional[int] = None) -> Optional[bytes]:\n ...",
"def read(self, n):\n logger.debug(\"Reading {} bytes...\".format(n))\n bytes_ = self.impl.read(n)\n logger.debug(\"Received: {} bytes\".format(len(bytes_)))\n return bytes_",
"def read(self, nbytes: int, /) -> bytes | None:",
"def _get_data(self, read_size):\n return self._pipe.recv_bytes()",
"def read(self, nbytes, /) -> bytes | None:",
"def get_data_from_endpoint(self, from_, to_, endpoint):\n endpoint = self.make_endpoint(endpoint)\n from_, to_ = str(from_), str(to_)\n payload = {\n 'auth': self.auth_token,\n 'id': self.monitor_id,\n 'start': from_,\n 'end': to_,\n 'extendLimit': 'true',\n 'fullContents': 'true'\n }\n\n r = self.session.get(endpoint, params=payload)\n ratelimit_remaining = r.headers['X-RateLimit-Remaining']\n #print ('Remaining Ratelimit = ' + str(ratelimit_remaining))\n\n # If the header is empty or 0 then wait for a ratelimit refresh.\n if (not ratelimit_remaining) or (float(ratelimit_remaining) < 1):\n #print('Waiting for ratelimit refresh...')\n sleep(self.ratelimit_refresh)\n\n return r",
"def endpoint_read_worker(endpoint, condition=True):\n\n def __synchronise():\n \"\"\" Simple function to match the first whole packet and synchronise. It just drops each byte\n if it's not a packet indicator. It's really primitive at this stage. \"\"\"\n # FIXME: We should match some pattern rather than single byte\n ch = endpoint.read()\n while not ch or ord(ch) not in DGTL.descriptors.keys():\n ch = endpoint.read()\n\n return ch\n\n # Read packet indicator\n msg = __synchronise()\n\n # Assemble data slices into non-fragmented packets\n while condition:\n # Read the minimum amount of data to get packet length\n try:\n hdr_size, fmt, _ = DGTL.descriptors[struct.unpack('<B', msg)[0]]\n msg += endpoint.read(hdr_size)\n\n # Extract packet length from the header\n pkt_len = struct.unpack(fmt, msg)[-1]\n\n # Read the whole message and queue it for the router to handle\n msg += endpoint.read(pkt_len)\n endpoint.message_put(\n protocol.Pkt.packetize(endpoint, apply_filters(msg, *endpoint.get_input_filters())))\n\n # Read next packet indicator\n msg = endpoint.read()\n except KeyError:\n print(\"Synchronisation error.\")\n msg = __synchronise()",
"def _read_bytes(self, start, num_bytes):\n with self._fp_lock:\n self._fp.seek(start)\n return self._fp.read(num_bytes)",
"def read(self, size=-1):\n ...",
"def _readBytes(self, len):\n return self.socket.recv(len)",
"def _readBytes(self, len):\n return self.stream.read(len)",
"def _read_bytes(self, start, count): # type: (int) -> bytes\n bytes_data = self._buffer[start:start + count]\n\n if len(bytes_data) != count:\n raise ASN1WantMore('Premature end of input.')\n\n return bytes_data",
"def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data",
"def make_read_request(file_offset=1, byte_count=MAX_READ):\n return StenoPacket(\n packet_id=StenoPacket.ID_READ,\n p1=file_offset,\n p2=byte_count,\n )",
"def _read_nowait(self, n: int) -> bytes:\n ...",
"def ReceiveBufferSize(self) -> int:",
"def ReceiveBufferSize(self) -> int:",
"def readReply(self, inputEndpoint, size = None, unpackingFormat=None, timeout=None):\n if inputEndpoint is None:\n raise Exception(\"endpoint cannot be none\")\n\n buffer = array.array('B',[0]*inputEndpoint.wMaxPacketSize)\n if unpackingFormat is not None:\n size = calcsize(unpackingFormat)\n\n if size is None:\n inputEndpoint.read(size_or_buffer=buffer, timeout=timeout)\n else:\n buffer = inputEndpoint.read(size_or_buffer=size, timeout=timeout)\n\n if unpackingFormat is not None:\n return unpack(unpackingFormat, buffer)\n\n return buffer",
"def read(self, nbytes: int, write: int = 0x00, /) -> bytes:",
"def read(self, nbytes: int, write: int = 0x00, /) -> bytes:",
"async def read(self, num_bytes=0) -> bytes:\n if num_bytes < 1:\n num_bytes = self.in_waiting or 1\n\n return await self._read(num_bytes)",
"def readfrom(self, addr: int, nbytes: int, stop: bool = True) -> bytes:\n ...",
"async def _read(self, n):\n return await self._reader.readexactly(n)",
"async def read_chunk(self, size: int = ...) -> bytes:\n ..."
]
| [
"0.6763095",
"0.6141447",
"0.6141447",
"0.613305",
"0.613305",
"0.6132118",
"0.6117879",
"0.6090794",
"0.60861325",
"0.6072254",
"0.6069751",
"0.6054077",
"0.60516727",
"0.60494274",
"0.6046029",
"0.6044382",
"0.6037771",
"0.6015013",
"0.5982061",
"0.59549093",
"0.59500825",
"0.59494454",
"0.59494454",
"0.5930251",
"0.5911411",
"0.5911411",
"0.5897341",
"0.5897016",
"0.5805961",
"0.5800514"
]
| 0.7538086 | 0 |
Writes the given data to the specified endpoint. | def write(self, endpoint, data):
return self.device.write(endpoint, data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _write(self, location, data):\n self._connector.write(location=location, data=data)",
"def write(self, data):\n raise NotImplementedError()",
"def write(data):",
"def _write(self, data):\n self._writer.write(data)",
"def write( data ):",
"def SendPacket(self, endpoint_addr, data):\n _, _, pipe = self._ep_fds[endpoint_addr]\n pipe.send(data)",
"def write(self, data: List[str]):\n\n # explore:\n # write_api = client.write_api(write_options=ASYNCHRONOUS)\n #\n # _point1 = Point(\"my_measurement\").tag(\"location\", \"Prague\").field(\"temperature\",\n # 25.3)\n # _point2 = Point(\"my_measurement\").tag(\"location\", \"New York\").field(\n # \"temperature\", 24.3)\n #\n # async_result = write_api.write(bucket=\"my-bucket\", record=[_point1, _point2])\n # async_result.get()\n #\n # client.close()\n # or\n # with _client.write_api(write_options=WriteOptions(batch_size=500,\n # flush_interval=10_000,\n # jitter_interval=2_000,\n # retry_interval=5_000,\n # max_retries=5,\n # max_retry_delay=30_000,\n # exponential_base=2))\n # as _write_client:\n # see https://github.com/influxdata/influxdb-client-python\n\n # write_api = self.connection.write_api(write_options=SYNCHRONOUS)\n self.write_api.write(self.config.bucket, self.config.org, data)\n # async_result.get()",
"def write(self, data: bytes) -> None:\n pass",
"def write(self, data):\n self._check_not_closed()\n raise io.UnsupportedOperation(\"Write not supported\")",
"def write_data(self, data):\n print('Wrote %d bytes' % (len(data)))",
"def write(self, data):\n self.check_connection()\n self._write(struct.pack(\"I\", len(data)))\n self._write(data)\n self._write_hmac.update(data)\n self._write(self._write_hmac.digest())",
"def write(self, data, meta):\n raise NotImplementedError",
"def write(self, data):\n with self.writing:\n raise NotImplementedError()",
"def write(self, data):\n return 0",
"def write(self, data: bytes):\n self._writer.write(data)",
"def write(self, data):\n\t\tself.outputbuffer.write(data)",
"async def _post_data(self, endpoint: str, data: dict):\n url = self.ws_server_url.replace(\"wss://\", \"https://\").replace(\n \"ws://\", \"http://\"\n )\n url = self.ws_server_url.replace(\"websocket\", endpoint)\n headers = {\n \"Authorization\": \"Bearer %s\" % self._get_token(),\n \"Content-Type\": \"application/json\",\n }\n async with self._http_session.post(\n url, headers=headers, json=data, verify_ssl=False\n ) as response:\n return await response.json()",
"def write(self, data: Union[str, bytes]) -> None:\n ...",
"def send_data(self, data):\n self._transport.write(data)",
"def write(self, data):\n self.buffer.write(data)\n self.offset += len(data)",
"def write(self, data):\n return self._write(self.wfile, data)",
"def handle_data_received(self, endpoint: USBEndpoint, data: bytes):\n\n if self.has_endpoint(endpoint.number, endpoint.direction):\n endpoint.handle_data_received(data)\n else:\n self.get_device().handle_unexpected_data_received(endpoint.number, data)",
"def write(self, filename, data):\n raise NotImplementedError",
"def write(self, data):\n with self._write_lock:\n self.socket.send(data)",
"async def emit(self, data):\n if type(data) is not str:\n serialized_data = json.dumps(data)\n else:\n serialized_data = data\n try:\n self.write(f\"data: {serialized_data}\\n\\n\")\n await self.flush()\n except StreamClosedError:\n app_log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise Finish()",
"def write(self, data):\n try:\n self._conn.send(data)\n except OSError as exc:\n raise TS3ConnectionClosedException(OSError) from exc",
"def _send_frame(self, dest, data):\n self._log.debug(\"write {} to {}\".format(len(data), dest)) \n # send to endpoint\n self._conn.sendto(data, (dest,0))",
"def write(self, data):\n self._write_lock.acquire()\n try:\n self.socket.sendall(data)\n finally:\n self._write_lock.release()",
"def send_data_as_json(self, endpoint, data):\n headers = {'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n\n headers.update(self.authorization())\n json_data = json.dumps(data)\n print(json_data)\n response = requests.post(endpoint, data=json_data, headers=headers)\n return response",
"def write_data():"
]
| [
"0.6847767",
"0.67225677",
"0.6463698",
"0.64547944",
"0.6416697",
"0.63848853",
"0.63613605",
"0.63533884",
"0.63493294",
"0.6248264",
"0.6236459",
"0.6230846",
"0.62266326",
"0.62134635",
"0.61800426",
"0.617929",
"0.61328673",
"0.6100123",
"0.60997343",
"0.6064489",
"0.5988189",
"0.5984429",
"0.59628236",
"0.5937367",
"0.5905651",
"0.59004956",
"0.5897763",
"0.58659345",
"0.58643997",
"0.5855781"
]
| 0.8481159 | 0 |
r"""Fill or create array of lengthm D, from value or value form a. | def fullArray(a, D):
A = list()
if isinstance(a, (int, float)): A = full(D, a)
elif isinstance(a, (ndarray, list)):
if len(a) == D: A = a if isinstance(a, ndarray) else asarray(a)
elif len(a) > D: A = a[:D] if isinstance(a, ndarray) else asarray(a[:D])
else:
for i in range(int(ceil(float(D) / len(a)))): A.extend(a[:D if (D - i * len(a)) >= len(a) else D - i * len(a)])
A = asarray(A)
return A | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_one_d_array(len, val):\n return [val for i in range(len)]",
"def fill_array(data, size, fill_value=numpy.nan, push_back=True):\n\n if push_back:\n return numpy.append(data, numpy.repeat(fill_value, size - data.size))\n\n return numpy.append(numpy.repeat(fill_value, size - data.size), data)",
"def fill(self, value: Any) -> JaggedArray:\n\n self.data[...] = value",
"def fill(self, value):\n if self.fragmented:\n (self[self._begin:].view(ndarray)).fill(value)\n (self[:self._end].view(ndarray)).fill(value)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n (part.view(ndarray)).fill(value)",
"def create_array( n ):",
"def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")",
"def c_array(ctype, values):\n\n arr = (ctype*len(values))()\n arr[:] = values\n return arr",
"def _make_coord_array_full(a,t,d,f):\n Na,Nt,Nd,Nf = a.shape[0],t.shape[0],d.shape[0],f.shape[0]\n X = np.zeros([Na,Nt,Nd,Nf,6],dtype=np.float64)\n for i in range(Na):\n for j in range(Nt):\n for k in range(Nd):\n for l in range(Nf):\n X[i,j,k,l,0:2] = a[i,:]\n X[i,j,k,l,2] = t[j]\n X[i,j,k,l,3:5] = d[k,:]\n X[i,j,k,l,5] = f[l]\n X = np.reshape(X,(Na*Nt*Nd*Nf,6))\n return X",
"def get_array_filled(self, name: str) -> np.ndarray:\n if name in self._per_chunk_arrays:\n return self.get_array(name)\n values = self.get_array_ragged(name)\n max_len = self._per_chunk_arrays[\"length\"].max()\n\n def resize_and_pad(v):\n l = len(v)\n per_shape = self._per_element_arrays[name].shape[1:]\n v = np.resize(v, max_len * np.prod(per_shape, dtype=int))\n v = v.reshape((max_len,) + per_shape)\n if name in self._fill_values:\n fill = self._fill_values[name]\n else:\n fill = np.zeros(1, dtype=self._per_element_arrays[name].dtype)[0]\n v[l:] = fill\n return v\n\n return np.array([resize_and_pad(v) for v in values])",
"def to_array(self, fill_value: Optional[Any] = None) -> np.ndarray:\n if fill_value is None:\n fill_value = infer_nan(self.dtype)\n\n tmp = self.astype(float) if is_float(fill_value) else self\n return tmp.to_masked().filled(fill_value=fill_value)",
"def _pad_array(da, dim, left=False, boundary=None, fill_value=0.0):\n\n if boundary not in [\"fill\", \"extend\"]:\n raise ValueError(\"`boundary` must be `'fill'` or `'extend'`\")\n\n axis_num = da.get_axis_num(dim)\n shape = list(da.shape)\n shape[axis_num] = 1\n\n base_array = da.data\n index = slice(0, 1) if left else slice(-1, None)\n edge_array = da.isel(**{dim: index}).data\n\n use_dask = has_dask and isinstance(base_array, dsa.Array)\n\n if boundary == \"extend\":\n boundary_array = edge_array\n elif boundary == \"fill\":\n args = shape, fill_value\n kwargs = {\"dtype\": base_array.dtype}\n if use_dask:\n full_func = dsa.full\n kwargs[\"chunks\"] = edge_array.chunks\n else:\n full_func = np.full\n boundary_array = full_func(*args, **kwargs)\n\n arrays_to_concat = [base_array, boundary_array]\n if left:\n arrays_to_concat.reverse()\n\n return concatenate(arrays_to_concat, axis=axis_num)",
"def fill(self,value):\n if value is None:\n return\n if isinstance(value,numbers.Number):\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value\n elif isinstance(value,list):\n if len(value) != self.nrows*self.ncols:\n raise ValueError('matrix fill value has incorrect number of elements')\n\n if not all(isinstance(item,numbers.Number) for item in value):\n raise TypeError('matrix fill value not a list of numbers')\n index = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value[index]\n index += 1 \n else:\n raise TypeError('matrix fill value not a number')",
"def _infer_fill_value(val):\n\n if not is_list_like(val):\n val = [val]\n val = np.array(val, copy=False)\n if is_datetimelike(val):\n return np.array('NaT', dtype=val.dtype)\n elif is_object_dtype(val.dtype):\n dtype = lib.infer_dtype(_ensure_object(val))\n if dtype in ['datetime', 'datetime64']:\n return np.array('NaT', dtype=_NS_DTYPE)\n elif dtype in ['timedelta', 'timedelta64']:\n return np.array('NaT', dtype=_TD_DTYPE)\n return np.nan",
"def init_four_d_array(dimens, val):\n w, x, y, z = dimens\n return [[[[val for l in range(z)]\n for k in range(y)]\n for j in range(x)]\n for i in range(w)]",
"def zfill(self, width):\n return asarray(zfill(self, width))",
"def monome(c,d):\n P = [0]*(d+1)\n P[d] = c\n return P",
"def create_variable_array(times):\n R=np.empty(np.sum(times))\n return R",
"def iterable_to_vtk_array(\n data: typing.Iterable[float], number_of_values: int, default_value: float = 0.0\n) -> vtk.vtkDoubleArray:\n\n result = vtk.vtkDoubleArray()\n result.SetNumberOfValues(number_of_values)\n for index, value in enumerate(\n islice(chain(data, repeat(default_value)), number_of_values)\n ):\n result.SetValue(index, value)\n return result",
"def dask_arr(vals):\n try:\n import dask.array as da\n return da.from_array(vals, chunks=2)\n except ImportError:\n return vals",
"def create(self,\n fill_value:'Optional[Number]'=None,\n **kwargs):\n\n shape = self._dims.shape\n if fill_value is None:\n data = np.empty(shape, **kwargs)\n else:\n data = np.full(shape, fill_value, **kwargs)\n\n assert isinstance(data, self.DataClass), (\n f'overload for create is required for non-default data class({type(data)})')\n\n self._data = data\n return self.data",
"def prepend_1d_nb(a, n, value):\n out = np.empty(a.shape[0] + n, dtype=np.float_)\n out[:n] = value\n out[n:] = a\n return out",
"def a(*args, **kwargs):\n return np.array(*args, **kwargs)",
"def get_value_array(data, dimension, expanded, keep_index, geom_col,\n is_points, geom_length=geom_length):\n column = data[dimension.name]\n if keep_index:\n return column\n all_scalar = True\n arrays, scalars = [], []\n for i, geom in enumerate(data[geom_col]):\n length = 1 if is_points else geom_length(geom)\n val = column.iloc[i]\n scalar = isscalar(val)\n if scalar:\n val = np.array([val])\n if not scalar and len(unique_array(val)) == 1:\n val = val[:1]\n scalar = True\n all_scalar &= scalar\n scalars.append(scalar)\n if not expanded or not scalar:\n arrays.append(val)\n elif scalar:\n arrays.append(np.full(length, val))\n if expanded and not is_points and not i == (len(data[geom_col])-1):\n arrays.append(np.array([np.NaN]))\n\n if not len(data):\n return np.array([])\n if expanded:\n return np.concatenate(arrays) if len(arrays) > 1 else arrays[0]\n elif (all_scalar and arrays):\n return np.array([a[0] for a in arrays])\n else:\n array = np.empty(len(arrays), dtype=object)\n array[:] = [a[0] if s else a for s, a in zip(scalars, arrays)]\n return array",
"def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x",
"def array_fill(start, cnt, val):\n r = {}\n i = start\n while i <= (start + cnt):\n r[i] = val\n i += 1\n return r",
"def _asarray(source, size):\n noise = source()\n if size is None:\n return noise.next()\n #count = reduce(operator.mul, shape)\n return numpy.asarray([noise.next() for _ in range(size)])",
"def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a",
"def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a",
"def read1d(f, a):\n values = []\n while True:\n line = f.readline()\n t = line.strip().split()\n values = values + t\n if len(values) >= a.shape[0]:\n break\n a[:] = numpy.array(values[0:a.shape[0]], dtype=a.dtype)\n return a",
"def n(l):\n return np.array(l,dtype=object)"
]
| [
"0.66143",
"0.5841869",
"0.58300316",
"0.56885254",
"0.5657182",
"0.5595944",
"0.5565606",
"0.5515568",
"0.5509467",
"0.5499455",
"0.545319",
"0.5421347",
"0.54124904",
"0.53535265",
"0.53260875",
"0.5307944",
"0.5293515",
"0.5288737",
"0.52843624",
"0.52706736",
"0.52219445",
"0.5220035",
"0.52013206",
"0.51997405",
"0.51681244",
"0.51660645",
"0.5165459",
"0.5165459",
"0.51623946",
"0.5159856"
]
| 0.6009062 | 1 |
r"""Check if stoping condition reached. | def stopCond(self):
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(self):\n return not self.iteration < self.options['max_iters']",
"def check(self, context):\r\n return context.config.stopAt is not None",
"def stop_check(self):\n pass",
"def stopped_check(self, timeout=None):",
"def _stop(self):\n return True",
"def _termination(self):\n if self._never_terminate:\n return False\n\n if self._counter >= self._max_steps:\n return True\n\n return self.is_fallen() # terminates automatically when in fallen state",
"def stop(self):\n stopping = False\n if self.currentGeneration > maxIter:\n stopping = True\n logging.info(\"Maximum Iterations Reached!\")\n return stopping",
"def _check_classical_stop_conditions(self, changed):\n searching = changed and (self._iteration < self._max_iterations) \n if not changed:\n self._notify(message=LocalSearchMessage.StoppedPrematurely)\n elif not searching: \n self._notify(message=LocalSearchMessage.Stopped)\n elif self._target_fitness:\n if self._solution.fitness >= self._target_fitness:\n self._notify(message=LocalSearchMessage.StoppedTargetAchieved)\n return False\n return searching",
"def is_stop(self) -> bool:\n return self.__stop",
"def stopped(self):\n return self.state == 'stop'",
"def check_termination(self) -> bool:\n return self.terminate",
"def stop(self):\n return self.random.uniform(0, 1) < 1/self.k",
"def __bool__(self):\n return not self._stopped",
"def should_stop(self):\n return self._cmaes.should_stop()",
"def stop() -> None:",
"def check_early_stop(self) -> bool:\n if self.args.early_stopping_steps == -1:\n return False\n return self._steps_since_new_prefix >= self.args.early_stopping_steps",
"def _thread_check_stop_event(self):\n self._require_controller_modes(['running_as_thread','running_as_blocking_call'])\n return self.thread.check_stop_event()",
"def stopped(self):\r\n return self._stop.isSet()",
"def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'",
"def is_stop(self):\n return self.p_state._getvalue()['stop']",
"def stopped(self):\n return self.stop_event.is_set()",
"def must_stop(self):\n global_stop = self.data_persister.exists(path=self.get_worker_path(), key=\"stop\", write_type=SavingType.txt)\n if global_stop:\n return True, \"global_stop\"\n\n worker_stop = self.data_persister.exists(\n path=self.get_worker_path(), key=str(self.worker_informations[\"id\"]) + \"_stop\", write_type=SavingType.txt\n )\n if worker_stop:\n return True, \"worker_stop\"\n\n if self._keyboard_funct() == 17:\n return True, \"ctrq_pressed\"\n\n return False, None",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\r\n self.terminating = True",
"def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed",
"def is_stopper(self):\r\n return self.stopper",
"def stop(self):\n return S.Infinity"
]
| [
"0.75141114",
"0.7417896",
"0.73706514",
"0.7015613",
"0.7012812",
"0.6949197",
"0.6934178",
"0.6902843",
"0.68835634",
"0.68042654",
"0.67862904",
"0.6783206",
"0.67785966",
"0.6764178",
"0.67639863",
"0.67567784",
"0.67475766",
"0.6744244",
"0.6728208",
"0.67224294",
"0.6695159",
"0.6642003",
"0.663836",
"0.663836",
"0.663836",
"0.663836",
"0.6631988",
"0.66275305",
"0.66211855",
"0.6612585"
]
| 0.8338078 | 0 |
r"""Evaluate the solution A. | def eval(self, A):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self) -> int:",
"def evaluate(self):\n #fac o lista cu toate perechile si vad daca se repeta vreuna (pana acum)\n nr=0\n \n pairs = []\n for i in range(0,self.__size):\n for j in range(0, self.__size):\n if self.__solution[i] != [] and self.__solution[i+self.__size] != [] : #sa am de unde face perechea\n p=[]\n p.append(self.__solution[i][j])\n p.append(self.__solution[i+self.__size][j])\n pairs.append(p)\n for p in pairs:\n if pairs.count(p) == 1:\n nr += 1\n\n return self.__size*self.__size - nr + 1 # pun acel +1 ca sa nu fie 0 niciodata -> ca sa nu am probleme la impartire\n # la 0 mai incolo\n #return nr",
"def evaluate(self, algo):\n raise NotImplementedError()",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total",
"def solve(self):",
"def evaluate(self, *args, **kwargs):\n params = self.process_args(args, kwargs)\n a = params['a']\n b = params['b']\n return a * self.x + b",
"def evaluate(self) :\n pass",
"def __call__(self, x_eval, X_far, X_near, inner_product_far=None):\n\n b = self._compute_b(x_eval, X_far, X_near)\n A = self._compute_A(X_far, X_near)\n Q = self._compute_Q(X_far, X_near, inner_product_far, A)\n\n lamda = self._qp_solver(Q, b)\n\n if np.all(Q @ lamda + b + self._feasibility_eps >= 0):\n return A.T @ lamda\n else:\n None",
"def evaluate(self):\n return self._evaluate_recur(self.root())",
"def evaluate(self):\n return self._evaluate_recur(self.root())",
"def evaluate(self, X):\n\n\t\tpass",
"def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)",
"def run_exact(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n exact_eigensolver = ExactEigensolver(self.operator, k=1)\n self.result = exact_eigensolver.run()\n\n solution = self.extract_solution(self.result, True)\n return solution",
"def _solve_subproblem(\n self, x_eval: Tensor, x_neg: Tensor, X_pos: Tensor,\n cache_book: dict\n ) -> Tensor:\n\n # Q = A @ A.t()\n A, b, Q = self._qp_params(\n x_eval, x_neg, X_pos, cache_book\n )\n lamda = self._qp_solver(Q, b)\n\n return -A.t() @ lamda",
"def evaluate_solution(self, chosen):\n self.candidate_counter += 1\n\n # evaluation function in abstract superclass\n \n solved_clauses = np.any(self.truth_clauses & np.array([chosen, ~chosen]), axis=(2, 1)) \n num_solved_clauses = np.sum(solved_clauses)\n # calculate evaluation with weight adaption heuristic\n evaluation = np.sum(solved_clauses * self.clause_weights)\n\n if self.candidate_counter == self.WEIGHT_ADAPTION_DURATION:\n # increase weights for unsatisfied clauses\n self.clause_weights += ~solved_clauses\n self.candidate_counter = 0\n\n return evaluation, num_solved_clauses",
"def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):\n value_function = np.zeros(nS)\n ############################\n # YOUR IMPLEMENTATION HERE #\n def next_state_reward(P,state,action,gamma,value_function):\n sum_reward=0\n for p,nextS,r,boolean_v in P[state][action]:\n sum_reward+=p*( r + gamma* value_function[nextS])\n #print(sum_reward) \n return sum_reward\n\n while True:\n delta=0;\n for state in range(nS):\n new_value=0;\n for action in range(nA):\n sum_reward=next_state_reward(P,state,action,gamma,value_function)\n new_value+=policy[state][action]*sum_reward\n delta= max(delta, abs(new_value-value_function[state]))\n value_function[state] = new_value\n #print(value_function)\n if(delta < tol):\n break\n\n ############################\n return value_function",
"def evaluate(self,coeffs,evalpts):\n a1,a2,a3,A0,E0,G0,n = coeffs\n x = asarray(evalpts) #XXX: requires a numpy.array\n return (a1 + a2*x + a3*x*x + A0 * ( G0/(2*pi) )/( (x-E0)*(x-E0)+(G0/2)*(G0/2) ))/n",
"def eval(self, theta, force=False):\n \n self.update_A_b(theta, force)\n \n if self.b.ndim != 2:\n raise ValueError(\"self.b.ndim not equal to 2.\")\n \n n,p = self.b.shape\n \n #x = numpy.zeros_like(self.b)\n #for k in range(p):\n # x[:,k] = self.solver.backsolve(self.b[:,k], transp='N')\n #return x\n \n # Using the multiple-r.h.s capability of solver.backsolve\n return self.solver.backsolve(self.b)",
"def solve(self):\n for step in self.run.values():\n step.solve()",
"def eval(f, a, j=0):\n return f.per(dmp_eval_in(f.rep, f.dom.convert(a), j, f.lev, f.dom), lower=True)",
"def evaluate(self,**d):\r\n\t\t\r\n\t\t# evaluate terms\r\n\t\tv = [i.evaluate(**d) for i in self]\r\n\t\t\r\n\t\t# sum results\r\n\t\tc = Pa(v).sum()\r\n\t\t\r\n\t\treturn c",
"def solve(self):\n \n raise NotImplementedError(\"not implemented!\")",
"def _evaluate(self, x):\n out_bot, out_top = self.out_of_bounds(x)\n\n return self._eval_helper(x, out_bot, out_top)",
"def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-3):\n\n\tvalue_function = np.zeros(nS)\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\n\twhile True:\n\t\tprevious_value_function = np.copy(value_function)\n\t\tfor s in range(nS):\n\t\t\taction = policy[s];\t# action specified by the policy\n\t\t\tcurrent_value = 0\n\t\t\tfor transition in P[s][action]:\t# for every possible transition\n\t\t\t\t# print(len(P[s][action]))\n\t\t\t\tprobability = transition[0]\n\t\t\t\treward = transition[2]\n\t\t\t\tnext_state = transition[1]\n\t\t\t\tvalue_next_state = previous_value_function[next_state]\n\n\t\t\t\t# print(\"probability: \" + str(probability) + \"reward: \" + str(reward) + \"value next state: \" + str(value_next_state))\n\n\t\t\t\tcurrent_value += probability * (reward + gamma * value_next_state)\n\t\t\t\n\t\t\tvalue_function[s] = current_value\n\n\t\t# find the maximum difference between the previous value and the current value\n\t\tdifArray = np.subtract(value_function, previous_value_function)\n\t\tfor i in range(nS):\n\t\t\tdifArray[i] = abs(difArray[i])\n\t\tmaxDif = np.amax(difArray)\n\n\t\tif (maxDif < tol):\n\t\t\tbreak\n\n\t############################\n\treturn value_function",
"def evaluate(self):\n raise NotImplementedError(\"Abstract method\")",
"def evaluate(self):\n raise NotImplementedError()",
"def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):\n value_function = np.zeros(nS)\n\n while True:\n change = 0\n for state_idx in range(nS):\n v = 0\n for action_idx, action_prob in enumerate(policy[state_idx]): # for each state in nA\n for probability, nextstate, reward, terminal in P[state_idx][action_idx]:\n v += action_prob * probability * (reward + gamma * value_function[nextstate])\n change = max(change, abs(v - value_function[state_idx]))\n value_function[state_idx] = v\n if change < tol:\n break\n return value_function",
"def problem_1a():\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return 2/5\n # END_YOUR_ANSWER"
]
| [
"0.6217597",
"0.62039953",
"0.60716563",
"0.6069594",
"0.6069594",
"0.60084265",
"0.5963306",
"0.59397626",
"0.5933084",
"0.5912575",
"0.5874191",
"0.5874191",
"0.5852872",
"0.5846498",
"0.5827469",
"0.58160734",
"0.58049905",
"0.57597136",
"0.57506883",
"0.5730561",
"0.5729231",
"0.57020736",
"0.5686272",
"0.5674635",
"0.56734437",
"0.5672898",
"0.5660994",
"0.56591475",
"0.5653794",
"0.5647816"
]
| 0.70369786 | 0 |
r"""Get the number of times that this class returnd inf in the evaluation function, because of stoping condition error. | def unused_evals(self):
return self.Evals - self.nFES | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inf(self):\n return self._inf",
"def stop(self):\n return S.Infinity",
"def get_number_of_evaluation(self):\n return self.n_eval",
"def evaluate(self) -> int:",
"def _evaluate(self, state):\n leading_power_error = self.get_leading_power_error(state)\n if np.isfinite(leading_power_error):\n return -float(leading_power_error)\n else:\n return self._default_value",
"def count_noninf(multilayer):\n out = 0\n for x in multilayer:\n out = out + 0 if numpy.isinf(x.thickness) else out + 1\n return out",
"def nevals(self):\n return self._n",
"def number_of_iterations(self) -> int:\n pass",
"def isinf(x):\n return False",
"def count(self, elem):\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)",
"def isinf(data):\n return _make.isinf(data)",
"def getNIterations(self):\n return self.getOrDefault(self.nIterations)",
"def getNIterations(self):\n return self.getOrDefault(self.nIterations)",
"def test_inf_treatment(self):\n values_with_infs = np.array([1, 2, 3, -np.inf, +np.inf, +np.inf])\n\n with self.subTest(\n \"Test if the warning for number of inf values is raised in hist_w_unc\"\n ):\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_infs, bins=np.linspace(0, 3, 3))\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 3 +-inf values!\",\n )\n )\n with self.subTest(\n \"Test if error is raised if inf values are in input but no range is defined\"\n ), self.assertRaises(ValueError):\n hist_w_unc(values_with_infs, bins=10)",
"def flaky_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"flaky_count\")",
"def number_of_iterations(self) -> int:\n return self._solution.info.iter",
"def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0",
"def n_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def n_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def get_iter_num(self):\n\tif len(self.cost) > 0:\n first_key = list(self.cost.keys())[0]\n num = len(self.cost[first_key]) - 1\n\telse:\n\t first_key = list(self.prim_var.keys())[0]\n num = len(self.prim_var[first_key]) - 1\n\treturn num",
"def __len__(self):\n return self.nb_iterations",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def flaky_count(self) -> int:\n return pulumi.get(self, \"flaky_count\")",
"def _compute_value(self) -> float:\n logging.info('Counting flaky builds')\n session = db.Session()\n builds = models.Build.scope(\n session, base_time=self.base_time).filter(\n models.Build.state.in_([\n models.TravisState.PASSED,\n models.TravisState.FAILED,\n models.TravisState.ERRORED,\n ])).all()\n session.close()\n build_count = len(builds)\n\n if build_count == 0:\n raise ValueError('No Travis builds to process.')\n if build_count < 3:\n raise ValueError('Not enough Travis builds to determine flakiness.')\n\n flakes = 0\n build_triples = zip(builds[:-2], builds[1:-1], builds[2:])\n for prev_build, curr_build, next_build in build_triples:\n if (prev_build.state == models.TravisState.PASSED and\n curr_build.state != models.TravisState.PASSED and\n next_build.state == models.TravisState.PASSED):\n flakes += 1\n\n return flakes / build_count",
"def Ni_find(t):\r\n return ep(t) - 1",
"def stop(self):\n return not self.iteration < self.options['max_iters']",
"def torch_isnotfinite(x):\n not_inf = ((x + 1) != x)\n not_nan = (x == x)\n return 1 - (not_inf & not_nan)",
"def search(f):\n x = 0\n while not f(x):\n x += 1\n return x",
"def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff",
"def _df_err(self):\n return self.n - self.k - 1"
]
| [
"0.64540565",
"0.6115566",
"0.6067888",
"0.6027168",
"0.598054",
"0.5936932",
"0.591695",
"0.59072673",
"0.5906285",
"0.589785",
"0.5794203",
"0.5791656",
"0.5791656",
"0.57543516",
"0.572959",
"0.5719795",
"0.5703006",
"0.57021004",
"0.57021004",
"0.56639737",
"0.56598085",
"0.5651198",
"0.56489366",
"0.56408757",
"0.5639369",
"0.56157154",
"0.56154096",
"0.56154037",
"0.5606832",
"0.5606273"
]
| 0.6201155 | 1 |
Implements a deep neural network for classification. params is a list of (weights, bias) tuples. inputs is an (N x D) matrix. returns normalized class logprobabilities. | def neural_net_predict(params, inputs):
for W, b in params:
outputs = np.dot(inputs, W) + b
inputs = np.tanh(outputs)
return outputs - logsumexp(outputs, axis=1, keepdims=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def neural_net_predict(self, inputs):\n for W, b in self.params:\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs # - logsumexp(outputs, axis=1, keepdims=True)",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n reg=0.0, weight_scale=1e-2, dtype=np.float32):\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # Initialize the network parameters with different weights and biases for #\n # network layers #\n ############################################################################\n \n key = ['W' + str(1), 'b' + str(1)]\n self.params[key[0]] = weight_scale * np.random.randn(input_dim, hidden_dims[0])\n self.params[key[1]] = np.zeros(hidden_dims[0])\n \n for i in range(1, len(hidden_dims)):\n key = ['W' + str(i+1), 'b' + str(i+1)]\n \n self.params[key[0]] = weight_scale * np.random.randn(hidden_dims[i-1], hidden_dims[i])\n self.params[key[1]] = np.zeros(hidden_dims[i])\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n self.params[key[0]] = weight_scale * np.random.randn(hidden_dims[len(hidden_dims)-1], num_classes)\n self.params[key[1]] = np.zeros(num_classes)\n\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)",
"def __call__(self, inputs):\n bias = self.param(\"bias\", self.bias_initializer,\n (1, 1, 1, inputs.shape[-2], inputs.shape[-1]))\n\n modulus_inputs = jnp.abs(inputs)\n return (nn.relu(modulus_inputs + bias) *\n (inputs / (modulus_inputs + self.epsilon)))",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def nn_predict(params, inputs, nonlinearity=auto_np.tanh):\n for W, b in params:\n outputs = auto_np.dot(inputs, W) + b\n inputs = nonlinearity(outputs)\n return outputs",
"def __init__(self, input_dim=(1, 28, 28), num_classes=10):\n self.params = {}\n\n #######################################################################\n # TODO: Initialize weights and biases for the convolutional neural #\n # network. Weights should be initialized from a Gaussian distribution;#\n # biases should be initialized to zero. All weights and biases should #\n # be stored in the dictionary self.params. #\n #######################################################################\n\n filter_size = 5\n weight_scale = 1e-2\n num_filters = 6\n hidden_dim = 784\n\n #****** THIS WAS TO TEST OUT FASTER NETWORKS *******\n\n self.params['W1'] = np.random.normal(scale=weight_scale, size=(num_filters, input_dim[0], filter_size, filter_size))\n # self.params['W2'] = np.random.normal(scale=weight_scale, size=(num_filters, 6, filter_size, filter_size))\n self.params['W3'] = np.random.normal(scale=weight_scale, size=(864, num_classes))\n\n # self.params['W3'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n # self.params['W4'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n\n self.params['b1'] = np.zeros(num_filters)\n # self.params['b2'] = np.zeros(num_filters)\n self.params['b3'] = np.zeros(num_classes)\n\n # self.params['b3'] = np.zeros(num_classes)\n # self.params['b4'] = np.zeros(num_classes)",
"def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,\n weight_scale=1e-3, reg=0.0):\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b2'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################",
"def logistic_predict(weights, data):\n #####################################################################\n # TODO: #\n # Given the weights and bias, compute the probabilities predicted #\n # by the logistic classifier. #\n #####################################################################\n \n y = None\n N = len(data)\n M = len(weights) - 1\n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n data = temp\n\n\n\n # print(np.matmul(data, weights).shape)\n z = np.matmul(data, weights)\n y = sigmoid(z)\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return y",
"def create_classifier(in_dim, hid_dim, out_dim):\n W = np.ones((in_dim, hid_dim))\n b = np.ones(hid_dim)\n U = np.ones((hid_dim, out_dim))\n b_tag = np.ones(out_dim)\n\n W = np.random.randn(W.shape[0], W.shape[1])\n U = np.random.randn(U.shape[0], U.shape[1])\n b = np.random.randn(b.shape[0])\n b_tag = np.random.randn(b_tag.shape[0])\n params = [W, b, U, b_tag]\n return params",
"def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n #print W2.shape, b2.shape\n\n #print len(W1), data[0], b1, len(data)\n\n ### YOUR CODE HERE: forward propagation\n #Eg, find the cost function. Save some intermediate stuff though, seems like it'd be useful\n #h = sigmoid(x * w1 + b1)\n # y = (softmax( h * w2 + b2)\n # hence the cost function will be labels * log(y) and then sum it all up\n\n z_1 = np.matrix(data) * W1 + b1\n h = sigmoid(z_1)\n y_prime = softmax(h * W2 + b2)\n logs = np.log(y_prime)\n\n #print y_prime.shape\n\n #print np.array(logs) * labels\n\n cost = - np.sum(np.array(logs) * labels, axis = 1)\n cost = np.sum(cost) # lets add up each instance fo the cost for now and see what happens\n\n # My question is then do we just sum up the costs of each function\n #print cost #somethign is printing so I'm gonan say i'm a genius right here duh\n\n #Cost(y, y') = -sum of (y * log Y')\n ### END YOUR CODE\n\n ### YOUR CODE HERE: backward propagation\n\n # you'll need gradients for each parameter except for the input vectors. Right now this isn't even a word2vec\n delta_1 = y_prime - labels\n delta_2 = delta_1 * W2.T\n #print sigmoid_grad(h).shape\n delta_3 = np.array(delta_2) * sigmoid_grad(h)\n\n gradW2 = np.array(h.T * delta_1) # i dunno or its reverse OMG I HASTE EVERYONE why is it that np.array fixes everything. Sigh\n gradb2 = np.array(np.sum(delta_1, axis=0)) # main issue is that this is a 20 x 5 vector when it should be a 1 x 5\n gradW1 = data.T.dot(delta_3)\n gradb1 = np.sum(delta_3, axis=0) # this should be 1 x10 not 20 x 5\n\n\n\n ### END YOUR CODE\n\n #print gradW1, gradW1.flatten()\n # print 'jee'\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((\n gradW1.flatten(),\n gradb1.flatten(),\n gradW2.flatten(),\n gradb2.flatten())\n )\n #print grad\n #print cost\n return cost, grad",
"def forwardPropagation(self, inputs, label):\n node_hidden = np.dot(inputs, self.input_W)\n node_hidden = np.add(node_hidden, self.input_B)\n node_hidden = np.maximum(0, node_hidden)\n node_output = np.dot(node_hidden, self.hidden_W)\n node_output = np.add(node_output, self.hidden_B)\n #print(node_output)\n exp_node_output = np.exp(node_output)\n node_output = exp_node_output / np.sum(exp_node_output, axis=1, keepdims=True)\n #print(node_output)\n #node_output = self.softmax(node_output)\n loss = np.sum(-np.log(node_output[range(inputs.shape[0]),label]))/(inputs.shape[0])+0.5 * self.regularizer*np.sum(self.input_W *self.input_W)+0.5 * self.regularizer*np.sum(self.hidden_W *self.hidden_W)\n \"\"\"Loss= Input data loss + Loss correction by penalizing the loss, here we use 0.2 as an experimental value\"\"\"\n #loss = np.sum(-np.log(node_output[range(inputs.shape[0]), label])) / (inputs.shape[0]) + 0.2 * self.regularizer * np.sum(self.input_W ^ 2) + 0.2 * self.regularizer * np.sum(self.hidden_W ^ 2)\n return loss, node_hidden, node_output",
"def net(input_lst, weight_lst, bias):\r\n net_total = bias\r\n\r\n for node in range(len(input_lst)):\r\n net_total += input_lst[node] * weight_lst[node]\r\n\r\n return net_total",
"def mlpfwd(self,inputs):\n\n self.hidden = np.dot(inputs,self.weights1);\n self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))\n self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)\n\n outputs = np.dot(self.hidden,self.weights2);\n\n # Different types of output neurons\n if self.outtype == 'linear':\n \treturn outputs\n elif self.outtype == 'logistic':\n return 1.0/(1.0+np.exp(-self.beta*outputs))\n elif self.outtype == 'softmax':\n normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))\n return np.transpose(np.transpose(np.exp(outputs))/normalisers)\n else:\n print \"error\"",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)",
"def __init__(self, numpy_rng, input, n_in, hidden_layers_sizes, n_out):\n # instance variables\n self.numpy_rng = numpy_rng\n self.input = input\n self.n_in = n_in\n self.hidden_layers_sizes = hidden_layers_sizes\n self.n_layers = len(hidden_layers_sizes)\n self.n_out = n_out\n\n self.hidden_layers = []\n self.params = []\n\n self.initialize_variables()\n\n\n ################\n ## Prediction ##\n ################\n self.y_pred = self.logistic_regression_layer.y_pred",
"def __init__(self, input, n_in, n_out):\r\n # start-snippet-1\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared(value=np.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True)\r\n # initialize the biases b as a vector of n_out 0s\r\n self.b = theano.shared(value=np.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True)\r\n\r\n # symbolic expression for computing the matrix of class-membership\r\n # probabilities\r\n # Where:\r\n # W is a matrix where column-k represent the separation hyperplane for class-k\r\n # x is a matrix where row-j represents input training sample-j\r\n # b is a vector where element-k represent the free parameter of hyperplane-k\r\n # p_y_given_x is a matrix\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # symbolic description of how to compute prediction as class whose\r\n # probability is maximal\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n self.y_pred_prob = T.max(self.p_y_given_x, axis=1)\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]\r\n\r\n # keep track of model input\r\n self.input = input",
"def build_model(inputs, num_classes, is_training, hparams):\n scopes = setup_arg_scopes(is_training)\n with contextlib.nested(*scopes):\n if hparams.model_name == 'pyramid_net':\n logits = build_shake_drop_model(\n inputs, num_classes, is_training)\n elif hparams.model_name == 'wrn':\n logits = build_wrn_model(\n inputs, num_classes, hparams.wrn_size)\n elif hparams.model_name == 'shake_shake':\n logits = build_shake_shake_model(\n inputs, num_classes, hparams, is_training)\n return logits",
"def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)",
"def forward(self, xs, like_params, nan_mask=None):\n\t\tassert len(like_params) == 1, f\"BernoulliLikelihood only takes\" \\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\tassert len(like_params.shape) == 4, f\"len({like_params.shape}) != 4\"\n\t\txs = xs.unsqueeze(1) # [b,1,m,m_dim]\n\t\tdist = Bernoulli(logits=like_params)\n\t\tlog_probs = dist.log_prob(xs).sum(dim=3) # [b,s,m]\n\t\tif nan_mask is not None:\n\t\t\ttemp_mask = (~nan_mask).float().unsqueeze(1).expand(log_probs.shape)\n\t\t\tassert temp_mask.shape == log_probs.shape, \\\n\t\t\t\t\tf\"{temp_mask.shape} != {log_probs.shape}\"\n\t\t\tlog_probs = log_probs * temp_mask # [b,s,m]\n\t\treturn log_probs",
"def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)",
"def _build_deeplab(images, labels, ignore_labels, FLAGS, is_training=True):\n outputs_to_num_classes = dict(FLAGS.outputs_to_num_classes)\n\n outputs_to_scales_to_logits = multi_scale_logits(\n images,\n model_options=FLAGS,\n image_pyramid=FLAGS.image_pyramid,\n weight_decay=FLAGS.weight_decay,\n is_training=is_training,\n outputs_to_num_classes=outputs_to_num_classes,\n fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)\n if labels is None:\n return outputs_to_scales_to_logits\n\n loss = None\n for output, num_classes in outputs_to_scales_to_logits.items():\n loss = ms_softmax_with_logits(\n outputs_to_scales_to_logits[output],\n labels,\n ignore_labels,\n upsample_logits=FLAGS.upsample_logits,\n scope=output)\n\n return outputs_to_scales_to_logits, loss",
"def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])",
"def evaluate_neural_network(data, keep_prob, num_layers, seed, weights, biases):\n\n\tif verbose:\tprint('model_tensorflow.evaluate_neural_network() called')\n\n\t# Calculate linear and ReLU outputs for the hidden layers\n\ta_prev = data\n\tfor i in range(num_layers-1):\n\t\tz = tf.add(tf.matmul(a_prev, weights['W' + str(i+1)]), biases['b' + str(i+1)])\n\t\ta = tf.nn.relu(z)\n\t\ta_r = tf.nn.dropout(a, keep_prob, seed=seed)\n\t\ta_prev = a_r\n\t# Calculate linear output for the output layer (logits)\n\tz_o = tf.add(tf.matmul(a_prev, weights['W' + str(num_layers)]), biases['b' + str(num_layers)])\n\n\treturn z_o",
"def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y",
"def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights",
"def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model",
"def __init__(self, input, n_in, n_out):\r\n # start-snippet-1\r\n\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )\r\n\r\n # initialize the baises b as a vector of n_out 0s\r\n self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )\r\n\r\n # symbolic expression for computing the matrix of class-membership probabilities where:\r\n # W is a matrix where column-k represent the separation hyper plain for class-k\r\n # x is a matrix where row-j represents input training sample-j\r\n # b is a vector where element-k represent the free parameter of hyper plane-k\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # symbolic description of how to compute prediction as class whose probability is maximal\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n # end-snippet-1\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]\r\n\r\n # keep track of model input\r\n self.input = input",
"def build_dense_network(data, hidden_layers, **kwargs):\n # Input layer\n with tf.variable_scope(\"layer_1\"): \n weights = tf.get_variable(\"weights\", shape = [input_shape[-1] + 1\n , hidden_layers[0]], initializer = tf.variance_scaling_initializer())\n\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([data, tf.ones(dtype = tf.float32\n , shape = (tf.shape(data)[0], 1))], axis = 1) # concat\n , weights, name = \"multiply\") # matmul\n , α, name = \"output\") # leaky relu\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Hidden layers 1 to len(hidden_layers) - 1\n for i in range(2, len(hidden_layers)-1+2):\n\n with tf.variable_scope(f\"layer_{i}\"):\n n_nodes = hidden_layers[i-1]\n\n weights = tf.get_variable(\"weights\", shape = [hidden_layers[i-2]+1, hidden_layers[i-1]], initializer = tf.variance_scaling_initializer())\n output = tf.nn.leaky_relu(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), α, name = \"output\")\n\n # DROP-OUT after the activation func\n output = tf.nn.dropout(output, keep_prob=δ, name = \"output\") \n\n # Output layer\n with tf.variable_scope(f\"layer_{len(hidden_layers)+1}\"):\n\n weights = tf.get_variable(\"weights\", shape = (hidden_layers[1]+1, n_summaries), initializer = tf.variance_scaling_initializer())\n output = tf.identity(tf.matmul(tf.concat([output, tf.ones(dtype = tf.float32, shape = (tf.shape(data)[0], 1))], axis = 1), weights, name = \"multiply\"), name = \"output\")\n # NO DROP-OUT in the last layer\n\n\n return output",
"def make_classification_nn(layer_sizes):\n num_weights, compute_hiddens = make_nn_funs(layer_sizes)\n\n def make_predictions(weights, inputs):\n \"\"\"Normalize log-probabilities.\"\"\"\n hiddens = compute_hiddens(weights, inputs)\n return hiddens - logsumexp(hiddens, axis=1, keepdims=True)\n\n def likelihood(weights, inputs, targets):\n return np.sum(make_predictions(weights, inputs) * targets, axis=1)\n\n return num_weights, make_predictions, likelihood",
"def __init__(self, input, n_in, n_out):\r\n\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared(value=numpy.zeros((n_in, n_out),\r\n dtype=theano.config.floatX),\r\n name='W', borrow=True)\r\n # initialize the baises b as a vector of n_out 0s\r\n self.b = theano.shared(value=numpy.zeros((n_out,),\r\n dtype=theano.config.floatX),\r\n name='b', borrow=True)\r\n\r\n # compute vector of class-membership probabilities in symbolic form\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # compute prediction as class whose probability is maximal in\r\n # symbolic form\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]"
]
| [
"0.6198682",
"0.6188579",
"0.5969461",
"0.5948908",
"0.59376526",
"0.59095854",
"0.58389044",
"0.57970214",
"0.57521266",
"0.574874",
"0.5740589",
"0.57374215",
"0.5727985",
"0.5725142",
"0.5716328",
"0.5698211",
"0.5688488",
"0.5681211",
"0.5670693",
"0.5665312",
"0.565391",
"0.56538135",
"0.5640104",
"0.5639738",
"0.5635937",
"0.5624893",
"0.5613334",
"0.55953526",
"0.5586822",
"0.5586349"
]
| 0.72680444 | 0 |
Computes l2 norm of params by flattening them into a vector. | def l2_norm(params):
flattened, _ = flatten(params)
return np.dot(flattened, flattened) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]",
"def norm_l2(u):\n return linalg.norm(u.ravel())",
"def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)",
"def l2(vec):\n return np.linalg.norm(vec)",
"def norm_l2(v):\n return np.sqrt((v**2).sum())",
"def L2norm(m):\n return np.sqrt(np.sum(m**2))",
"def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))",
"def vec_2norm (x):\n return math.sqrt (sum ([x_i**2 for x_i in x]))",
"def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)",
"def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5",
"def l2_norm(vec_or_matrix):\n if len(vec_or_matrix.shape) == 1:\n # linear vector\n return vec_or_matrix / np.linalg.norm(vec_or_matrix)\n elif len(vec_or_matrix.shape) == 2:\n return vec_or_matrix / np.linalg.norm(vec_or_matrix, axis=1, ord=2)[:, np.newaxis]\n else:\n raise ValueError('Wrong number of dimensions, 1 or 2 is supported, not %i.' % len(vec_or_matrix.shape))",
"def two_norm(v):\n return math.sqrt(dot_product(v, v))",
"def l2norm(X):\n norm = np.linalg.norm(X, axis=1, keepdims=True)\n return 1.0 * X / norm",
"def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output",
"def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)",
"def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))",
"def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)",
"def l2_norm(pattern):\n return np.linalg.norm(pattern)",
"def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)",
"def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))",
"def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)",
"def normalize_l2(x):\n return x / (npla.norm(x))",
"def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm",
"def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):\n if filters is None:\n filters = shape_list(x)[-1]\n with tf.variable_scope(name, default_name=\"l2_norm\", values=[x], reuse=reuse):\n scale = tf.get_variable(\n \"l2_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"l2_norm_bias\", [filters], initializer=tf.zeros_initializer())\n epsilon, scale, bias = [cast_like(t, x)\n for t in [epsilon, scale, bias]]\n mean = tf.reduce_mean(x, axis=[-1], keepdims=True)\n l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)\n norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)\n return norm_x * scale + bias",
"def l2(v, axis=None):\n length = v.shape[0]\n return np.sqrt(np.sum(np.square(v), axis=axis) / length)",
"def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2",
"def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm",
"def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)",
"def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()",
"def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2"
]
| [
"0.82546264",
"0.77542174",
"0.7592876",
"0.7537406",
"0.746447",
"0.7305168",
"0.72727126",
"0.712277",
"0.70562875",
"0.70500493",
"0.69952273",
"0.6988028",
"0.6913676",
"0.6865785",
"0.6841669",
"0.6832344",
"0.68215376",
"0.6781754",
"0.67134154",
"0.6705719",
"0.6691719",
"0.6688912",
"0.6682432",
"0.6670041",
"0.6656863",
"0.66472065",
"0.66322595",
"0.65700644",
"0.656677",
"0.65335786"
]
| 0.8654899 | 0 |
Get all company lists | def company_lists(self):
return self.client.get('company/named-lists') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_companies_and_people():",
"def get_companies(self):\n url = 'companies'\n result = self.get(url)\n return result['companies']",
"def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)",
"def list_all(request):\n companies = Company.objects.order_by('-created')\n context = dict(companies=companies)\n return render(request, 'companies/all.html', context)",
"def get_companies(self):\n response = self.do_request('/undertaking/list')\n if response:\n return response.json()",
"def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def read_companies():\n list_of_companies = data_manager.get_data_from_file(filename=\"company/company_data.csv\")\n return list_of_companies",
"def company_list_by_name(self, list_name):\n return self.client.get(\n 'company/named-lists/{list_name}'.format(\n list_name=list_name\n )\n )",
"def get_companies(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in groupCompanies]",
"def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()",
"def get_companies(self, **kwargs):\n return self.get('companies.json', **kwargs)",
"def get_sp_list():\n bs = get_soup('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n sp_companies = bs.find_all('a', class_=\"external text\")\n return sp_companies",
"def get_companies(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in userCompanies]",
"def get_available_companies(team):",
"def all_companies(login_details):\n output = None\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL;'\n\n c, conn = connection(login_details)\n try:\n c.execute(sql)\n values = c.fetchall()\n if values is not None:\n output = values\n finally:\n conn_close(c, conn)\n\n return output",
"def test_get_all_companies(self):\n create_company()\n res = self.client.get(ALL_COMPANIES_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def list(self) -> List[Organisation]:\n ...",
"def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result",
"def get_companies(request):\n try:\n companies = []\n for company in Company.objects.all():\n companies.append(company.dump_to_dict())\n\n return format_ajax_response(True, \"Companies list retrieved successfully.\", {'companies': companies})\n except Exception as ex:\n logging.error(\"failed to get_companies: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving the companies listing.\")",
"def getDomains(self, company):\n return self.db.getDomains(company)",
"def get_available_companies_and_people(team):",
"def pull_companies(cls, soup):\n companies = []\n parents = soup.findAll('div', 'row job-information')\n for parent in parents:\n try:\n temp = parent.find('div', 'columns large-2 medium-3 small-12').find('h4')\n except AttributeError:\n companies.append(None)\n else:\n if temp.a:\n # Company name is sometimes wrapped in anchor tag\n companies.append(temp.find('a').contents[0].strip())\n else:\n companies.append(temp.contents[0].strip())\n return companies",
"def test_all_companies(self, setup_data):\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': '',\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['count'] > 0",
"def get_companies_and_people(team):",
"def test_get_all_company_props_using_get(self):\n pass",
"def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)"
]
| [
"0.7882304",
"0.7596987",
"0.70944256",
"0.6964053",
"0.69328797",
"0.691499",
"0.6873857",
"0.6873857",
"0.6873857",
"0.6873857",
"0.686815",
"0.6856728",
"0.6840562",
"0.67586446",
"0.67511743",
"0.6706522",
"0.6700748",
"0.6663124",
"0.6660832",
"0.65631896",
"0.6477927",
"0.64454114",
"0.6416149",
"0.6316102",
"0.63120997",
"0.6294343",
"0.6293985",
"0.62859595",
"0.6262821",
"0.6258967"
]
| 0.86757165 | 0 |
Get a specific company list by name | def company_list_by_name(self, list_name):
return self.client.get(
'company/named-lists/{list_name}'.format(
list_name=list_name
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def company_lists(self):\n return self.client.get('company/named-lists')",
"def get_companies(self, **kwargs):\n return self.get('companies.json', **kwargs)",
"def search_company(cls, name, clause):\n return [('sale.company', ) + tuple(clause[1:])]",
"def get_all_companies_and_people():",
"def get_company(self, name):\n return self.website.company.id",
"def get_companies(self):\n url = 'companies'\n result = self.get(url)\n return result['companies']",
"def get_company(self, name):\n return self.instance.company.id",
"def get_company(self, name):\n return self.store.company.id",
"def get_company(self, cmp):\n if cmp in self.cnames:\n return self.cnames[cmp]\n else:\n return None",
"def get_available_companies(team):",
"def get_company(self, company_referece):\n url = 'companies/{0}'.format(company_referece)\n result = self.get(url)\n return result.get('company', result)",
"def API_companysearch(request):\n company = request.GET.get(\"search\")\n company = str(company).strip()\n results = models.Company.objects.filter(name__icontains = company)\n results = [[company.pk,company.name] for company in results]\n return django.http.JsonResponse({\"success\":True,\"results\":results})",
"def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result",
"def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())",
"def get_companies_and_people(team):",
"def read_companies():\n list_of_companies = data_manager.get_data_from_file(filename=\"company/company_data.csv\")\n return list_of_companies",
"def pull_companies(cls, soup):\n companies = []\n parents = soup.findAll('div', 'row job-information')\n for parent in parents:\n try:\n temp = parent.find('div', 'columns large-2 medium-3 small-12').find('h4')\n except AttributeError:\n companies.append(None)\n else:\n if temp.a:\n # Company name is sometimes wrapped in anchor tag\n companies.append(temp.find('a').contents[0].strip())\n else:\n companies.append(temp.contents[0].strip())\n return companies",
"def get_sp_list():\n bs = get_soup('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n sp_companies = bs.find_all('a', class_=\"external text\")\n return sp_companies",
"def get_available_companies_and_people(team):",
"def get_companies(self):\n response = self.do_request('/undertaking/list')\n if response:\n return response.json()",
"def get_companyName(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.name for x in groupCompanies]",
"def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def get_customers_by_name(name: str = '') -> List[Customer]:\n customers = []\n for c in get_market().customers.values():\n if not name or c.name == name:\n customers.append(c)\n return customers",
"def get_company_info(company_name):\n\n # Fix formatting of name\n co = company_name.replace(\".\", \"\").replace(\" \", \"%20\")\n\n query = f\"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={co}\\\n ®ion=1&lang=en&callback=YAHOO.Finance.SymbolSuggest.ssCallback\"\n\n response = requests.get(query)\n\n fdata = response.text.split(\"(\", 1)[1]\n fdata = fdata.rsplit(\")\", 1)[0]\n data = json.loads(fdata)\n yahoo_json = data[\"ResultSet\"][\"Result\"]\n\n return yahoo_json",
"def get_company_info(company_no):\n in_ = 'curl -s -X GET -u yLwgnyHvwlYxkbOBAoLEwsaEfVQ_a7kAuCUTNtSt: https://api.companieshouse.gov.uk/company/{}/officers?q=Officers&items_per_page=100&start_index=0'.format(company_no).split()\n\n out = subprocess.check_output(in_)\n res = json.loads(out.decode('utf8'))\n ret = res['items']\n \n return ret",
"def API_companyPO(request):\n company = request.GET.get(\"po\")\n search = request.GET.get(\"po_search\")\n company = models.Company.objects.get(pk = company)\n if not company: return django.http.HttpResponseBadRequest(\"Invalid company ID\")\n pos = doors.models.Order.objects.filter(customer_po__icontains = company)\n results = [po.customer_po for po in pos]\n return django.http.JsonResponse({\"success\":True,\"results\":results})",
"def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies"
]
| [
"0.7050499",
"0.70122504",
"0.6967497",
"0.69125587",
"0.68775976",
"0.68144214",
"0.67415774",
"0.6726178",
"0.6497163",
"0.6486741",
"0.6471226",
"0.6468147",
"0.6429776",
"0.6398427",
"0.63927275",
"0.6366573",
"0.6242466",
"0.61506873",
"0.61404985",
"0.6133206",
"0.60923433",
"0.60805273",
"0.607923",
"0.6040344",
"0.60391647",
"0.603229",
"0.60273623",
"0.59489566",
"0.59372395",
"0.59372395"
]
| 0.8003518 | 0 |
Returns a list of all 970 characters long texts from the given book file. | def get_texts(book: TextIO) -> list:
content = book.read()
chars_limit = 970
texts = [content[i:i + chars_limit] for i in range(0, len(content), chars_limit)]
return ["..." + t + "..." if t != texts[0] else t + "..." for t in texts] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_words_in_book(filename):\n f = open(filename, \"r\")\n content = f.read()\n f.close()\n wds = text_to_words(content)\n return wds",
"def get_word_list(file_name):\n\tnew_list = []\n\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\tend_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\twhile lines[end_line].find('End of the Project Gutenberg EBook') == -1:\n\t\tend_line -= 1\n\tlines = lines[curr_line + 1:end_line]\n\n\tlong_lines = ''.join(str(e) for e in lines)\n\tlong_lines = long_lines.lower()\n\tlong_lines = long_lines.translate(None, punctuation)\n\n\twords = long_lines.split()\n\tfor item in words:\n\t\tnew_list.append(item)\n\n\treturn new_list",
"def read_book_words(infile):\t\n\tfin = open(infile) \n\tlines = fin.readlines()\n\twords = []\n\tfor line in lines[25:16263]: #skipping over the header information\n\t\tline = line.replace('-', ' ')\n\t\tt = line.split()\n\t\tfor word in t:\n\t\t\tword = word.strip(string.punctuation + string.whitespace + '\\xe2\\x80\\x9c' + '\\xe2\\x80\\x9d')\n\t\t\tword = word.lower()\n\t\t\twords.append(word)\n\t#words.remove('')\n\treturn words",
"def open_file(file,n,n1=0):\n\tfin = open(file)\n\tbook_lines = []\n\tcount = 0\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tcount += 1\n\t\tif count > n:\n\t\t\tbook_lines.append(word)\n\treturn book_lines",
"def get_longest_diverse_words(file_path: str) -> List[str]:\n with open(file_path, encoding=\"unicode-escape\") as f:\n text = f.read()\n text = text.translate(str.maketrans(dict.fromkeys(string.punctuation)))\n words = list(set(text.replace(\"-\\n\", \"\").replace(\"\\n\", \" \").split(\" \")))\n return sorted(words, key=lambda x: len(set(x)))[-10:]",
"def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words",
"def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)",
"def read_file(filename):\n\n all_documents = []\n document = []\n with tf.gfile.GFile(filename, \"r\") as reader:\n for line in reader:\n line = line.strip()\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent_line in sents:\n if not sent_line or len(sent_line) < 4: # Arbitrary min length for line\n continue\n if sent_line.lower()[:7] == \"chapter\":\n if document:\n all_documents.append(document)\n document = []\n else:\n document.append(sent_line)\n if len(document) == FLAGS.max_para_length:\n all_documents.append(document)\n document = []\n if document:\n all_documents.append(document)\n\n # Remove small documents\n all_documents = [x for x in all_documents if len(x) >= 8]\n\n return all_documents",
"def find_long_words():\n f = open('session09/words.txt')\n \n for line in f:\n word = line.strip()\n if len(word) > 20:\n print(word, len(word))",
"def read_book_words(infile):\t\n\timport string\n\t\n\tfin = open(infile) \n\tlines = fin.readlines()\n\twords = []\n\tfor line in lines[25:]: #skipping over the header information\n\t\tline = line.replace('-', ' ')\n\t\tt = line.split()\n\t\tfor word in t:\n\t\t\tword = word.strip(string.punctuation + string.whitespace + '\\xe2\\x80\\x9c' + '\\xe2\\x80\\x9d')\n\t\t\tword = word.lower()\n\t\t\twords.append(word)\n\t#words.remove('')\n\treturn words",
"def get_word_list(file_name):\n\n\tstoryEdit = []\n\n\t#Reads the file starting after the beginning\t\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\tlines = lines[curr_line+1:]\n\n\n\t#Loops through each row, making everything lowercase and replacing all punctuation\n\tfor row in lines:\n\t \trow = row.lower()\n\t \trow = row.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t \tstoryEdit += row.split()\n\n\n\t#Returns the final list as \n\treturn storyEdit",
"def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)",
"def get_word_list(file_name):\n file_ = open(file_name, 'r')\n lines = file_.readlines()\n\n start_line = 0\n while lines[start_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n start_line += 1\n\n lines = lines[start_line+1:]\n\n end_line = 0\n while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n end_line += 1\n\n lines = lines[:end_line-3]\n\n list_ = ' '.join(lines)\n list_ = str.lower(list_)\n list_ = list_.translate(None, string.punctuation)\n list_ = list_.split()\n\n return list_",
"def get_word_list(file_name):\n # Read the file specified\n f = open(file_name,'r')\n lines = f.readlines()\n \n # Remove header text from lines\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line + 1:]\n\n # Remove footer text from lines\n curr_line = -1\n while lines[curr_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line -= 1\n lines = lines[: curr_line]\n\n # Strip lines into words\n words = []\n for i in range(len(lines)):\n # Remove punctuation\n next_line = lines[i].translate(string.maketrans(\"\",\"\"), string.punctuation)\n next_line = next_line.lower()\n words += next_line.split()\n \n return words",
"def read_txt(txtfile):\n with open(txtfile, \"r\", encoding=\"utf8\") as infile: \n text = infile.read()\n #print(text[0:100])\n return text",
"def get_all_text_for_book(book_file_name):\n if not os.path.exists(verse_store):\n os.mkdir(verse_store)\n with open(book_file_name, \"r+\") as rh:\n book = json.load(rh)\n chapter_text = {}\n\n for name, chapters_dictionary in book.items():\n for chap, location in chapters_dictionary.items():\n outfile = \"{}/{}_{}.json\".format(verse_store, normalize_filename(name), chap)\n \n if os.path.exists(outfile):\n continue\n else:\n chapter_text[name + \"__\" + chap] = get_chapter_text(location)\n with open(outfile, \"w+\") as wh:\n json.dump(chapter_text, wh)\n chapter_text = {}",
"def load_words(filename):\n url = codeskulptor.file2url(filename)\n word_file = urllib2.urlopen(url)\n \n all_words = []\n for line in word_file.readlines():\n all_words.append(line.strip())\n \n \n return all_words",
"def read_book_from_file(book):\n with open(book, 'r') as book:\n book_contents = book.read()\n return book_contents",
"def getlistfromtext(self,filename):\n l=[]\n\n if self.encoding:\n f = codecs.open(filename,\"r\",encoding=self.encoding)\n for line in f:\n l.append(line.rstrip())\n f.close()\n\n else:\n f = open(filename,\"r\")\n for line in f:\n l.append(line.rstrip())\n f.close()\n return l",
"def load_words(filename):\n\n url = codeskulptor.file2url(filename)\n netfile = urllib2.urlopen(\n \"http://codeskulptor-assets.commondatastorage.googleapis.com/assets_scrabble_words3.txt\")\n words_file = netfile.readlines()\n\n words = [word[:-2] for word in words_file]\n\n return words",
"def load_valid_words():\n valid_word_list = []\n file = open(TRANSCRIPTION_PATH, 'r')\n words = file.readlines()\n file.close()\n for word in words:\n if int(word[0:3]) >= 300:\n valid_word_list.append(word[0:9])\n return valid_word_list",
"def twentychar2():\n with open('words.txt','r') as fd:\n wordList = fd.read().split() # split the word from space \n print ([word for word in wordList if len(word) > 20])",
"def read_words(filename):\n # load assets\n word_file = urlopen(filename)\n \n # read in files as string\n words = word_file.read()\n \n # template lines and solution lines list of line string\n # if the input value is '\\n' then TypeError: a bytes-like object is required, not 'str'\n word_list = words.split(b'\\n')\n word_list = [word.decode('ascii') for word in word_list]\n print(\"Loaded a dictionary with\", len(word_list), \"words\")\n return word_list",
"def get_doc(filename :str) -> List[List[str]]:\n\tdata = []\n\ttry:\n\t\twith open(filename, 'r', encoding='utf-8') as f:\n\t\t\tcontent = f.read()\n\t\t\t# print(content)\n\t\t\tpattern = re.compile(r\"<doc.*?>(.*?)</doc>\",re.S)\n\t\t\ttexts = re.findall(pattern, content)\n\t\t\t# print(data)\n\n\t\t\tfor text in texts:\n\t\t\t\t# print(text)\n\t\t\t\ttemp = process_doc(text)\n\t\t\t\tdata.extend(temp)\n\t\t\t\t# print(len(temp))\n\n\t\t\treturn data\n\n\texcept IOError as e:\n\t\tprint(\"the file {} cannot open\".format(filename))\n\t\tprint(e)\n\t\traise IOError",
"def read_word_file(self, filename):\n words = []\n try:\n file = open(filename, 'rt', encoding='utf8')\n words = [word[:-1] for word in file.readlines()]\n\n except Exception as e:\n print(f'[-] Error occurred while reading word file: {e}')\n\n return words",
"def load_words(filename):\r\n url = codeskulptor.file2url(filename)\r\n netfile = urllib2.urlopen(url)\r\n ans = []\r\n for line in netfile.readlines():\r\n ans.append(line[:-1])\r\n return ans",
"def read_words(filename):\n with open(filename, encoding=\"utf-8\") as file:\n words = file.read().splitlines()\n return words",
"def read_words(filename):\n # load assets\n word_file = urllib2.urlopen(filename)\n \n # read in files as string\n words = word_file.read()\n \n # template lines and solution lines list of line string\n word_list = words.split('\\n')\n print \"Loaded a dictionary with\", len(word_list), \"words\"\n return word_list",
"def fetch_words(filename):\n data = [] #empty list\n with urlopen(filename) as story:\n for line in story:\n words = line.decode('utf-8').split() #must decode into strings and then separate with spaces\n #print(lists)\n for word in words:\n data.append(word)\n return(data)",
"def load_words(filename):\n url = codeskulptor.file2url(filename)\n netfile = urllib2.urlopen(url)\n \n words = []\n for line in netfile.readlines():\n words.append(line.replace('\\n',''))\n \n return words"
]
| [
"0.700185",
"0.6625363",
"0.64112055",
"0.6399384",
"0.6379729",
"0.6248625",
"0.62249917",
"0.6207146",
"0.61987627",
"0.6133272",
"0.612571",
"0.610539",
"0.61017334",
"0.60968274",
"0.60438025",
"0.6038257",
"0.6001676",
"0.59936243",
"0.5934076",
"0.5932082",
"0.5926508",
"0.5914084",
"0.5911522",
"0.5907851",
"0.58997875",
"0.5895655",
"0.5891936",
"0.58598566",
"0.58433807",
"0.583824"
]
| 0.7710512 | 0 |
Returns the complexity of the given text by adding up the frequencies of all its words. | def complexity(text:str) -> float:
words = text.split(' ')
freqs = [frequency(w) for w in words]
return sum(freqs) / (len(frequency_list) - freqs.count(0)) #sum of the frequencies / all the words that were in the list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))",
"def frequency(text):\n # TODO: change function input to a textfile?\n import collections\n freq = collections.Counter(text)\n # print freq\n return freq",
"def analyze(self, text):\n\n text = tknzr.tokenize(text)\n\n score = 0\n \n for word in text:\n if self.positiveWords.count(word.lower()) > 0:\n score += 1\n elif self.negativeWords.count(word.lower()) > 0:\n score -= 1\n \n return score",
"def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score",
"def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score",
"def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score",
"def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score",
"def analyze(self, text):\n \n total_words = len(text)\n \n negatives_length = len(self.negatives)\n positives_length = len(self.positives)\n \n posneg_sum = 0\n \n for word in text:\n \n for j in range(0, positives_length):\n if word == self.positives[j][:-1]:\n posneg_sum += 1\n \n for k in range(0, negatives_length):\n if word == self.negatives[k][:-1]:\n posneg_sum -= 1\n\n return posneg_sum",
"def entropy(self, text):\n\n# text = self.myReplacer.replace(text)\n# text = self.tokenizer.tokenize(text)\n new_text = []\n for word in text:\n if word.count('\\'') > 0:\n words = word.split('\\'')\n for w in words:\n new_text.append(w)\n else:\n new_text.append(word)\n text = new_text\n \n e = 0.0\n lenth = len(text)\n if lenth == 0:\n return 0\n elif lenth < self._n:\n current_n = lenth\n else:\n current_n = self._n\n \n for i in range(current_n - 1, len(text)):\n context = tuple(text[(i - current_n + 1) : i])\n token = text[i]\n e += self.logprob(token, context)\n return e",
"def analyze(self, text):\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for token in tokens:\n if token in self.positives_list:\n score += 1\n elif token in self.negatives_list:\n score -= 1\n\n return score",
"def evaluate(self, text):\n text = ' '.join(['<'] * (self.n - 1) + [text.replace(' . ', ' .%s ' % (' <' * (self.n - 1)))])\n tokens = self.split(text)\n sum = 0\n for i in range(len(tokens) - self.n + 1):\n n_gram = self.join(tokens[i: i + self.n])\n prob = self.get_probability(n_gram, True)\n sum += math.log(prob, self.log_base)\n return sum",
"def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score",
"def word_frequency(text):\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n\n stop = set(stopwords.words('english'))\n tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens))\n\n counts = Counter(tokens_without_stop)\n return counts",
"def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)",
"def compute_ari(text: str):\n characters = len(text.replace(\" \", \"\").replace(\".\", \"\").replace(\",\", \"\").replace(\";\", \"\"))\n words = text.count(\" \") + 1\n sentences = text.count(\".\")\n\n score = 4.71 * (characters / words) + .5 * (words / sentences) - 21.43\n\n return score",
"def summarize(text):\n # creating a dictionary for the word frequency table\n frequency_table = _create_dictionary_table(text)\n\n # tokenizing the sentences\n sentences = sent_tokenize(text)\n\n # algorithm for scoring a sentence by its words\n sentence_scores = _calculate_sentence_scores(sentences, frequency_table)\n\n # getting the threshold\n threshold = _calculate_average_score(sentence_scores)\n\n # producing the summary\n article_summary = _get_article_summary(\n sentences, sentence_scores, 0.7 * threshold)\n\n return article_summary",
"def get_avg_word_len(text):\r\n words = [len(s.translate(str.maketrans('', '', string.punctuation))) for s in text]\r\n return sum(words) / len(words)",
"def passion_analyzer(text):\n\n\tlower_text = text.lower()\n\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tpassion_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tpassion_index += (lower_text.count(positive_words[x]))**2\n\tfor x in range(len(negative_words)):\n\t\tpassion_index -= (lower_text.count(negative_words[x]))**2\n\tif '!' in text:\n\t\tpassion_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tpassion_index *= hashtag_scaling * lower_text.count('#') + 1\n\tpassion_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\n\n\t\t\n\treturn math.sqrt(passion_index)",
"def word_count(text):\n\n # Tokenize text on whitespace / newline\n words = text.strip().split()\n\n # Create a dictionary from the set of tokens, initializing each count to 0\n counts = dict.fromkeys(words, 0)\n\n # Iterate over the text to count occurences of each token\n for word in words:\n counts[word] += 1\n\n # Return the counts\n return counts",
"def mean_word_len(text):\r\n return np.mean(np.array([len(word) for word in tokenization(text)]))",
"def eval_text(self, text):\n # Pre-process sentence given\n sents = text.split('\\n')\n words = []\n for sent in sents:\n words.extend(list(sent))\n\n for idx, word in enumerate(words):\n if (word, ) not in self.uni_dist:\n words[idx] = TOKENS[\"UNK\"]\n\n # Compute Log-Probablities\n log_prob = 0\n for ngram in nltk.ngrams(words, self.N):\n log_prob += self.eval_ngram(ngram)\n\n # Compute Perplexity\n num_words = len(words)\n perplexity = 2 ** ((-1 / num_words) * log_prob)\n\n return perplexity",
"def analyze(self, text):\n score =0\n token = TweetTokenizer()\n tokens = token.tokenize(text)\n for token in tokens:\n if token.lower() in self.pos_list:\n score+=1\n elif token.lower() in self.neg_list:\n score-=1\n\n return score",
"def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat",
"def summarize(self, text, n):\n sents = sent_tokenize(text)\n assert n <= len(sents)\n word_sent = [word_tokenize(s.lower()) for s in sents]\n self._freq = self._compute_frequencies(word_sent)\n ranking = defaultdict(int)\n for i,sent in enumerate(word_sent):\n for w in sent:\n if w in self._freq:\n ranking[i] += self._freq[w]\n sents_idx = self._rank(ranking, n)\n return [sents[j] for j in sents_idx]",
"def average_word_length(self, text):\n return np.mean([len(word) for word in text])",
"def counterFrequency(text):\n dictText = {}\n maxN = 0\n mostFrequent = \"\"\n for item in text:\n if (item not in dictText):\n dictText[item] = 1\n else: \n dictText[item] +=1\n \n if (dictText[item] > maxN):\n mostFrequent = item\n maxN = dictText[item]\n return mostFrequent",
"def stats_text_en(text):\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','') # Remove the non-English characters in the text.\n text2 = text1.split() # Convert the string type to the list type.\n dict = {x: text2.count(x) for x in text2} # Count the times of each word in the list.\n dict1= sorted(dict.items(), key= lambda d:d[1], reverse = True) # Sort the words in the descending order according to the times of words.\n print(dict1) # Return the result.",
"def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict",
"def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score",
"def tf(self, term, text):\n return text.count(term) / len(text)"
]
| [
"0.7327232",
"0.6928021",
"0.69135123",
"0.68309027",
"0.6820116",
"0.6802307",
"0.67886895",
"0.6788378",
"0.6680395",
"0.66369337",
"0.6606218",
"0.659399",
"0.6587242",
"0.6502463",
"0.64892",
"0.64823335",
"0.64613557",
"0.64479107",
"0.64078027",
"0.63496876",
"0.63450474",
"0.63321656",
"0.6331747",
"0.63114303",
"0.6299393",
"0.6274808",
"0.62680054",
"0.6254993",
"0.62532085",
"0.62410724"
]
| 0.86525756 | 0 |
Returns a list of 5 keywords from the given text. | def keywords(text:str) -> list:
return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_keywords(text, max_keywords=10):\n keywords = rake.apply(text)\n return \" ; \".join([item[0] for item in keywords[:max_keywords]]).strip()",
"def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L",
"def keywords(text):\r\n from operator import itemgetter # for sorting\r\n text = split_words(text)\r\n numWords = len(text) # of words before removing blacklist words\r\n text = [x for x in text if x not in stopWords]\r\n freq = Counter()\r\n for word in text:\r\n freq[word] += 1\r\n\r\n minSize = min(10, len(freq))\r\n keywords = tuple(freq.most_common(minSize)) # get first 10\r\n keywords = dict((x, y) for x, y in keywords) # recreate a dict\r\n\r\n for k in keywords:\r\n articleScore = keywords[k]*1.0 / numWords\r\n keywords[k] = articleScore * 1.5 + 1\r\n\r\n keywords = sorted(keywords.iteritems(), key=itemgetter(1))\r\n keywords.reverse()\r\n return dict(keywords)",
"def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords",
"def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords",
"def text_to_keywords(text):\n response = alchemyapi.keywords('text', text, {'sentiment': 1})\n ret_keys = []\n\n if response['status'] == 'OK':\n keywords = response['keywords']\n for keyw_chunk in keywords[:2]:\n # if len(keywords) > 0:\n # keyw_chunk = keywords[0]\n top_keyword = keyw_chunk['text'].encode('utf-8')\n # else:\n # top_keyword = ''\n ret_keys.append(top_keyword)\n\n return ret_keys # top_keyword\n\n # for keyword in response['keywords']:\n # print('text: ', keyword['text'].encode('utf-8'))\n # print('relevance: ', keyword['relevance'])\n # print('sentiment: ', keyword['sentiment']['type'])\n # if 'score' in keyword['sentiment']:\n # print('sentiment score: ' + keyword['sentiment']['score'])\n # print('')\n else:\n print('Error in keyword extaction call: ', response['statusInfo'])\n return [] # ''",
"def get_keywords(text):\n tokens = [word.lower() for word in word_tokenize(text)]\n\n # tag words as verb, noun etc\n tagged_words = pos_tag(tokens)\n\n # retrieve list of boring words from file\n stopwords_file = os.path.join(BASE_DIR, 'data', 'stopwords.txt')\n with open(stopwords_file, 'r', encoding='utf-8') as f:\n stopwords = [line.rstrip(linesep) for line in f]\n \n #We don't want keywords to contain anything in this list\n forbidden = ['.',',',';',':','?','!','+',')','(','[',']','/','<','>','\"','©','1','2','3','4','5','6','7','8','9','0']\n\n # NLTK Chunking - detects noun phrases and phrases of form verb noun or adj noun\n patterns = \"\"\"NP: {<JJ>*<NN><NNS>}\n {<JJR><NNS>}\n {<JJ>*<NNS>}\n {<NN><NNS>} \n {<JJ><NNS>}\n {<JJ>*<NN>*}\n {<NN>*}\n {<NNS>*}\"\"\"\n chunker = RegexpParser(patterns)\n chunks = chunker.parse(tagged_words)\n\n #these are the phrases we want, as lists within a list\n validphrases = []\n for t in chunks.subtrees():\n if t.label() == 'NP':\n validphrases.append([x for x,y in t.leaves()])\n\n #turning lists within lists into actual noun phrases i.e [[radiation], [breast,cancer]] becomes [radiation, breast cancer]\n #sorry for my horrible code\n #trees suck\n lemmatizables = []\n for sublist in validphrases:\n lemmatizables.append(' '.join(sublist))\n\n lemmatizer = WordNetLemmatizer()\n lems = [lemmatizer.lemmatize(x) for x in lemmatizables]\n\n #removing stopwords after lemmatizinga, then removing anything containing punctuation or a number\n lems = filter(lambda lem: lem not in stopwords, lems)\n lems = filter(lambda lem: not any(char in lem for char in forbidden), lems)\n\n return tuple(lems)",
"def words(self, text):\n return re.findall(r'\\w+', text)",
"def get_words(text):\n return re.compile('\\w+').findall(text)",
"def get_keywords(self):\n all_keywords = []\n z_index = 0\n for zettel in self.lemma_tokens:\n keywords = []\n w_index = 0\n cur_zettel_dict = {}\n for word in zettel:\n cur_zettel_dict.setdefault(word[0], 0)\n cur_word_total_score = self.all_scores[z_index][w_index]\n if cur_zettel_dict[word[0]] > cur_word_total_score:\n w_index += 1\n continue\n else:\n cur_zettel_dict[word[0]] = cur_word_total_score\n w_index += 1\n cur_sorted = sorted(cur_zettel_dict.items(), key=lambda kv: kv[1], reverse=True)\n for i in range(self.keyword_n):\n keywords.append(str(cur_sorted[i]))\n z_index += 1\n all_keywords.append(keywords)\n return all_keywords",
"def extract_keywords(text):\n gcloud_response = gcloud_syntax_extraction(text)\n logging.info(\"gcloud syntax response: %s\", gcloud_response)\n\n tokens_shortened = []\n for token in gcloud_response.tokens:\n part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag).name\n if part_of_speech_tag in constants.KEY_PARTS_OF_SPEECH:\n token_data = {'word': token.text.content, 'lemma': token.lemma,\n 'part_of_speech': part_of_speech_tag}\n tokens_shortened.append(token_data)\n\n response = {'lan': gcloud_response.language, 'tokens': tokens_shortened}\n\n return response",
"def generate_keywords(self, target_text):\n logging.debug(\"Start generate keywords\")\n sentences = text_to_sentences(target_text)\n\n base_candidates = self._generate_base_candidates(target_text)\n\n temp_result = []\n for item in zip(base_candidates, base_candidates[1:]):\n temp_result += self._upward_grouping(*item)\n\n # convert str into objects\n temp_result = [Keyword(text=item) for item in temp_result]\n temp_result = self._mark_keyword_attributes(sentences, temp_result)\n\n # check overlapping, could be combined into one word\n return self._merge_keywords(sentences, temp_result)",
"def get_keywords(self, number=10):\n keyword = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n # print(key + ' - ' + str(value))\n keyword.append(key)\n if i > number:\n break\n return keyword",
"def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords",
"def alchemy_keywords(text):\n if text:\n # TODO Alchemy API breaks if overview text is greater than 150 kbytes\n # First step skip these. If time look at truncating, splitting, or combining\n # by first skipping, it will be easier to update later\n if sys.getsizeof(text) > 150000:\n return {}\n\n # Create an AlchemyAPI object.\n alchemy_obj = AlchemyAPI.AlchemyAPI()\n\n # Load the API key from disk.\n alchemy_obj.loadAPIKey(\"api_key.txt\")\n\n # Extract topic keywords from a text string.\n result = alchemy_obj.TextGetRankedKeywords(text)\n\n # Use xml.etree.ElementTree to process xml returned from AlchemyAPI\n # extract keyword and relevance\n root = ET.fromstring(result)\n\n keyword_dictionary = {}\n\n for node in root.iter(\"keyword\"):\n keyword = node.find(\"text\").text.encode(\"utf-8\")\n relevance = float(node.find(\"relevance\").text)\n keyword_dictionary[keyword] = relevance\n\n return keyword_dictionary\n else:\n print \"No text to analyze\"\n return {}",
"def extract_keywords(article_list, n=10):\n vectorizer = TfidfVectorizer()\n tfidf = vectorizer.fit_transform(article_list)\n words = vectorizer.get_feature_names()\n # check N > total_words_length or not\n maxn = tfidf.shape[1] if tfidf.shape[1] < n else n\n weights = tfidf.toarray()\n # sort by decrease order\n indices = map(lambda w: np.argsort(-w)[:maxn], weights)\n keywords = [list(map(lambda i: words[i], indy)) for indy in indices]\n return keywords",
"def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())",
"def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords",
"def _get_word_list(text):\n return re.findall('\\w+', text)",
"def get_word_list(text_string):\n\ttext_no_punc = ''\n\ttext = text_string[600:] #kill the header\n\tfor char in text: #killing punctuation\n\t\tif not is_punct_char(char):\n\t\t\ttext_no_punc = text_no_punc+char #so extend the string everytime we run into a letter\n\ttext_no_punc_lower = string.lower(text_no_punc)\n\tlist_of_words = []\n\tlist_of_words = text_no_punc_lower.split( ) #splitting the string into the list\n\treturn list_of_words",
"def getWords(self, text):\n\t\ttextWithoutPunctuation = self.removePunctuation(text)\n\t\treturn [word for word in textWithoutPunctuation.split() if len(word) >= 1]",
"def get_keywords(self, text, language=None):\n text = unicode(text)\n if language is not None:\n if language not in self._get_available_languages():\n logging.warn(\"User passed an unsupported language: %s. Falling back to: %s.\" % \\\n (language, self.default_lang))\n language = self.default_lang\n else:\n language = self.detect_language(text)\n\n # load stopwords (words common in given language)\n stop_words = stopwords.words(language)\n stop_words = [word.decode(\"utf-8\") for word in stop_words] # fix to a bug in nltk\n\n # split text into words\n words = [word for word in nltk.wordpunct_tokenize(text) if len(word)>2 and\\\n word.lower() not in stop_words]\n\n # find bi-grams (two word collocations)\n bigram_measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(words)\n finder.apply_freq_filter(self.min_bigram_freq)\n bigrams = finder.nbest(bigram_measures.pmi, 5)\n\n # this whole part below could be shortened by a few loops, but readability would suffer\n # convert words to stems, saving original words for stems in a dictionary\n stemmer = PorterStemmer()\n original_words = {}\n stemmed_words = []\n for word in words:\n stem = stemmer.stem(word).lower()\n original_words[stem] = word # TODO: handle most popular word for stem\n stemmed_words.append(stem)\n\n # find most frequent words, substitute stems by saved original words\n top_words = {original_words[word]:freq for word, freq in\\\n nltk.FreqDist(stemmed_words).items()}\n\n # exclude words already in bigrams\n expanded_bigrams = [word for bigram in bigrams for word in bigram]\n top_words = {word: freq for word, freq in top_words.items()\\\n if word not in expanded_bigrams}\n\n return self._filter_keywords(top_words, 0.4) + [\" \".join(bigram) for bigram in bigrams]",
"def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]",
"def kwiq(word, text, num = 3):\r\n arr = []\r\n dic = {}\r\n key = 0\r\n textspl = text.split()\r\n for part in textspl:\r\n dic[key] = part\r\n key += 1\r\n for q in list(dic.keys()):\r\n new_elem = dic[q].strip(',.?!:;')\r\n if new_elem == word or new_elem.lower() == word:\r\n string = ''\r\n for b in range(q-num, q+num+1):\r\n if b in dic:\r\n if b == q-1 or b == q:\r\n string += dic[b] + ' '\r\n else:\r\n string += dic[b] + ' '\r\n arr.append(string)\r\n return(arr)",
"def extract_phrases_with_keywords(text, keyword):\n sentences = split_text(text)\n phrases = []\n keyword = word_process(keyword)\n for sentence in sentences:\n words = re.findall(r'\\w+', sentence)\n for i, word in enumerate(words):\n if word_process(word) == word_process(keyword): # Both word and keyword have been processed, so we can compare them directly\n start = sentence.index(words[max(0,i-2)])\n end = sentence.index(word) + len(word)\n phrases.append(sentence[start:end])\n return phrases",
"def global_matches(self, text):\n matches = []\n n = len(text)\n for list in [keyword.kwlist,\n __builtin__.__dict__,\n self.namespace]:\n for word in list:\n if word[:n] == text:\n matches.append(word)\n return matches",
"def get_keywords(seq):\r\n if len(seq) = 0:\r\n return None\r\n freqs = {}\r\n for w in seq: \r\n if w not in freqs:\r\n\t freqs[w] = 1\r\n\telse\r\n\t freqs[w] += 1\r\n num_keys = len(freqs)\r\n res = []\r\n \r\n return res",
"def extract_keywords(raw_text,id):\n\n print(\"Extracting keywords for \"+id)\n\n stemmer = nltk.PorterStemmer()\n\n # Construct text\n\n # Tokens\n tokens = nltk.word_tokenize(raw_text)\n # filter undesirable words and format\n words = [w.replace('\\'','') for w in tokens if len(w)>=3]\n text = nltk.Text(words)\n\n tagged_text = nltk.pos_tag(text)\n #nouns = [tg[0] for tg in tagged_text if tg[1]=='NN' or tg[1]=='NNP' ]\n #print(nouns)\n\n # multi-term\n multiterms = set()\n stem_dico = {}\n for i in range(len(tagged_text)) :\n # max length 4 for multi-terms ==> 3\n for l in range(1,4) :\n if i+l < len(tagged_text) :\n tags = [tagged_text[k] for k in range(i,i+l)]\n if potential_multi_term(tags) :\n multistemlist = [str.lower(stemmer.stem(tagged_text[k][0])) for k in range(i,i+l)]\n #multistem.sort(key=str.lower)\n\t\t #python 3 : remove .encode('ascii','ignore')\n multistem = functools.reduce(lambda s1,s2 : s1+' '+s2,multistemlist)\n rawtext = functools.reduce(lambda s1,s2 : s1+' '+s2,[str.lower(tagged_text[k][0]) for k in range(i,i+l)])\n multiterms.add(multistem)\n if multistem in stem_dico :\n stem_dico[multistem].add(rawtext)\n else :\n stem_dico[multistem] = set([rawtext])\n\n return [list(multiterms),stem_dico]",
"def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))",
"def tokenize(text):\n # YOUR CODE HERE\n t = text.lower()\n words = re.findall(r'[a-z]+',t)\n return words"
]
| [
"0.74684423",
"0.7436489",
"0.714602",
"0.6927766",
"0.68687236",
"0.669178",
"0.65788835",
"0.6512408",
"0.6431235",
"0.6420499",
"0.6381746",
"0.6351155",
"0.6331313",
"0.6329686",
"0.6294298",
"0.62856007",
"0.626946",
"0.6194702",
"0.61862916",
"0.6180569",
"0.6156291",
"0.6114342",
"0.61096174",
"0.60726917",
"0.6021574",
"0.59844834",
"0.5971467",
"0.59527606",
"0.5950997",
"0.58932453"
]
| 0.83092326 | 0 |
Returns a list with all the 100 words long texts from the given book along with their difficulties and keywords. | def categorize(book: TextIO) -> list:
chunks = get_texts(book)
texts = []
for t in chunks:
level = difficulty(complexity(t))
texts.append((t, level, keywords(t)))
return texts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_texts(book: TextIO) -> list:\n content = book.read()\n chars_limit = 970\n texts = [content[i:i + chars_limit] for i in range(0, len(content), chars_limit)]\n return [\"...\" + t + \"...\" if t != texts[0] else t + \"...\" for t in texts]",
"def get_words_in_book(filename):\n f = open(filename, \"r\")\n content = f.read()\n f.close()\n wds = text_to_words(content)\n return wds",
"def get_word_list(file_name):\n\tnew_list = []\n\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\tend_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\twhile lines[end_line].find('End of the Project Gutenberg EBook') == -1:\n\t\tend_line -= 1\n\tlines = lines[curr_line + 1:end_line]\n\n\tlong_lines = ''.join(str(e) for e in lines)\n\tlong_lines = long_lines.lower()\n\tlong_lines = long_lines.translate(None, punctuation)\n\n\twords = long_lines.split()\n\tfor item in words:\n\t\tnew_list.append(item)\n\n\treturn new_list",
"def read_book_words(infile):\t\n\tfin = open(infile) \n\tlines = fin.readlines()\n\twords = []\n\tfor line in lines[25:16263]: #skipping over the header information\n\t\tline = line.replace('-', ' ')\n\t\tt = line.split()\n\t\tfor word in t:\n\t\t\tword = word.strip(string.punctuation + string.whitespace + '\\xe2\\x80\\x9c' + '\\xe2\\x80\\x9d')\n\t\t\tword = word.lower()\n\t\t\twords.append(word)\n\t#words.remove('')\n\treturn words",
"def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words",
"def find_long_words():\n f = open('session09/words.txt')\n \n for line in f:\n word = line.strip()\n if len(word) > 20:\n print(word, len(word))",
"def format_textbooks(self, data):\n textbooks = ', '.join([self.input['textbook%s'%i] for i in range(1,4) if self.input.get('textbook%s'%i)])\n return textbooks",
"def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list",
"def read_book_words(infile):\t\n\timport string\n\t\n\tfin = open(infile) \n\tlines = fin.readlines()\n\twords = []\n\tfor line in lines[25:]: #skipping over the header information\n\t\tline = line.replace('-', ' ')\n\t\tt = line.split()\n\t\tfor word in t:\n\t\t\tword = word.strip(string.punctuation + string.whitespace + '\\xe2\\x80\\x9c' + '\\xe2\\x80\\x9d')\n\t\t\tword = word.lower()\n\t\t\twords.append(word)\n\t#words.remove('')\n\treturn words",
"def longwords_Li_Comp(strings):\n # write your code here\n return [string for string in strings if len(string)>4]",
"def fetch_words(url):\n\n with urlopen(url) as story:\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n\n return story_words",
"def convert_book_to_list_of_words(book):\n book = book.replace('\\n', ' ')\n book = book.split(' ')\n filtered_book = []\n for word in book:\n for c in word:\n word = ''.join([c for c in word if c.isalpha()])\n if word:\n filtered_book.append(word)\n return filtered_book",
"def fetch_words(url):\n\n with urlopen(url) as story:\n storyWords = []\n for line in story:\n words = line.split()\n for word in words:\n storyWords.append(word.decode('utf-8'))\n return storyWords",
"def longwords_Li_Comp(strings):\n return [string for string in strings if len(string)>4 ]",
"def fetch_words(filename):\n data = [] #empty list\n with urlopen(filename) as story:\n for line in story:\n words = line.decode('utf-8').split() #must decode into strings and then separate with spaces\n #print(lists)\n for word in words:\n data.append(word)\n return(data)",
"def fetch_words(url):\n story = urlopen(url)\n\n story_words = []\n\n for line in story:\n line_words = line.decode('utf-8').split()\n\n for word in line_words:\n story_words.append(word)\n\n story.close()\n\n return story_words",
"def get_word_list(file_name):\n # Read the file specified\n f = open(file_name,'r')\n lines = f.readlines()\n \n # Remove header text from lines\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line + 1:]\n\n # Remove footer text from lines\n curr_line = -1\n while lines[curr_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line -= 1\n lines = lines[: curr_line]\n\n # Strip lines into words\n words = []\n for i in range(len(lines)):\n # Remove punctuation\n next_line = lines[i].translate(string.maketrans(\"\",\"\"), string.punctuation)\n next_line = next_line.lower()\n words += next_line.split()\n \n return words",
"def find_long_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 15])",
"def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)",
"def read_book(url,book_num):\n\t#calls open_url function to open the url\n\tbook_contents = open_url(url)\n\tif book_contents != None:\n\t\t#calls filter data function to clean the data\n\t\tclean_data = filter_data(book_contents)\n\t\t#create dictionary for all the words in this book with 0's filling for count in all the books\n\t\tcreate_dict(clean_data)\n\t\treturn clean_data\n\telse:\n\t\treturn []",
"def get_wordlists():\n\n\tCS = {'ACM', 'IEEE', 'Computer Science', 'Artificial Intelligence',\n\t\t'Pattern Recognition', 'Computer Vision', 'Machine Learning',\n\t\t'Signal Processing', 'Electrical Engineering', 'Image Processing',\n\t\t'Data Mining', 'Neural Networks', 'Computer Graphics', 'Graphics',\n\t\t'Language Processing', 'Internet', 'Intelligent Systems',\n\t\t'Robotic','Data','Software', 'Machine Vision', 'Image Analysis',\n\t\t'Scientific Computing', 'SIAM', 'Malware','World Wide Web', \n\t\t'Computational Intelligence', 'Computational Linguistics',\n\t\t'Computational linguistics','Algorithm','Computer','ITiCSE',\n\t\t'ITICSE','Machine learning','Learning','learning',\n\t\t'Artificial intelligence','CIVR','Document Analysis'}\n\n\tbio = {'Biology', 'Microbiology', 'Molecular', 'Medical', 'Biological',\n\t\t'Cancer', 'Genome', 'Bioinformatics', 'Protein', 'Biocomputing',\n\t\t'Biomedical', 'biology', 'Medicine', 'Biosystems', 'Virology',\n\t\t'Brain', 'Psychology', 'Genetics', 'Bioengineering', 'Cell',\n\t\t'Cardiology', 'Metabolic', 'Biotechnology', 'Pathogens',\n\t\t'Pathology', 'Plant', 'PLANT', 'Virus', 'Drug','Medicinal',\n\t\t'Neuro','Psych',\n\t\t'Genomic','Diseases','Endocrinology', 'Epidemiology',\n\t\t'Proteom','Biochem', 'DNA', 'Pharma', 'Biomedic', 'biomedica',\n\t\t'Neurobiological'}\n\n\tmath = {'Mathemati','Markov','Probability','Algebra','Network',\n\t\t'Topology','Optimization', 'Geometr','Statistic','Algorithm',\n\t\t'Graph ','Graphs','Combinatori','Riemann Surfaces','Permutation Groups',\n\t\t'Functional Analysis', 'SIAM','Fixed Point','Wavelet','Statistics',\n\t\t'Linear Regression','Fractal','geometry','Multivariate','Chaos',\n\t\t'mathemati','Kernel'}\n\n\tlinguistics = {}\n\n\tcomputer_vision = {}\n\n\tchemistry = {}\n\n\tphysics = {}\n\n\t# Rename \"Computer Vision\" to \"Image Processing\"?\n\ttopic_names = ['Computer Science','Biology','Mathematics','Chemistry',\n\t\t'Physics','Computer Vision','Natural Language Processing']\n\ttopics = [CS, bio, math]#, linguistics, computer_vision, chemistry, physics]\n\n\treturn {topic_names[i]:topics[i] for i in range(len(topics))}",
"def words_from_text(file_with_text):\n import string\n\n text = open(file_with_text, 'r')\n\n words = []\n amount_of_words = 0\n number_different_words = 0\n\n for line in text:\n line = line.replace('-',' ')\n for word in line.split():\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n if word not in words:\n number_different_words +=1\n words.append(word)\n amount_of_words += 1\n\n\n return (\" This book has a total of %s words. It has %s different words !\") % (amount_of_words, number_different_words)",
"def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)",
"def get_all_text_for_book(book_file_name):\n if not os.path.exists(verse_store):\n os.mkdir(verse_store)\n with open(book_file_name, \"r+\") as rh:\n book = json.load(rh)\n chapter_text = {}\n\n for name, chapters_dictionary in book.items():\n for chap, location in chapters_dictionary.items():\n outfile = \"{}/{}_{}.json\".format(verse_store, normalize_filename(name), chap)\n \n if os.path.exists(outfile):\n continue\n else:\n chapter_text[name + \"__\" + chap] = get_chapter_text(location)\n with open(outfile, \"w+\") as wh:\n json.dump(chapter_text, wh)\n chapter_text = {}",
"def fetch_words(url):\n # story = urlopen('http://sixty-north.com/c/t.txt')\n story = urlopen(url)\n story_words = []\n for line in story:\n # line_words = line.split()\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n\n story.close()\n\n # for word in story_words:\n # print(word)\n return story_words",
"def get_word_list(file_name):\n\n\tstoryEdit = []\n\n\t#Reads the file starting after the beginning\t\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\tlines = lines[curr_line+1:]\n\n\n\t#Loops through each row, making everything lowercase and replacing all punctuation\n\tfor row in lines:\n\t \trow = row.lower()\n\t \trow = row.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t \tstoryEdit += row.split()\n\n\n\t#Returns the final list as \n\treturn storyEdit",
"def n_long_words(words, n):\n words_longer_than_n = []\n for word in words:\n if len(word) > n:\n words_longer_than_n.append(word)\n\n return words_longer_than_n",
"def get_words():\n\tprompts = []\n\tfor prompt in story.prompts:\n\t\tprompts.append(prompt.replace('_', ' '))\n\n\treturn render_template(\"get-words.html\", prompts = prompts, key_prompts = story.prompts, num_of_prompts = len(prompts))",
"def shakespeare_words():\n return itertools.chain.from_iterable(shakespeare.words(fileid) for fileid in shakespeare.fileids())",
"def get_introduction(length=128, words=None):"
]
| [
"0.7427155",
"0.6878758",
"0.607292",
"0.5961577",
"0.5925289",
"0.5917224",
"0.59168714",
"0.5914988",
"0.5853547",
"0.5837389",
"0.5834911",
"0.58301675",
"0.5810416",
"0.5801972",
"0.5788824",
"0.5769443",
"0.57662934",
"0.5717366",
"0.57149994",
"0.57097614",
"0.57049674",
"0.56738186",
"0.56574166",
"0.5639611",
"0.56388706",
"0.5632881",
"0.55939996",
"0.55916697",
"0.5588209",
"0.5566055"
]
| 0.69261056 | 1 |
Stores the given frequency list in a file ('freq_list'). | def save_frequencies(freqs: dict) -> None:
with open("freq_list", 'w') as stored_freq_list:
json.dump(freqs, stored_freq_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_frequency(count_table, input_file):\n # Opens new file to output to\n with open(f\"{input_file}.out\", \"w\") as text:\n # Total sum of every word's occurrence in the file.\n totalCount = sum(count_table.values())\n\n # Loop through each key and corresponding value in the dictionary\n for word, count in count_table.items():\n # Output the word, the count and the relative frequency of the word.\n text.write(f\"{word} {count} {round(count / totalCount, 3)}\\n\")",
"def dump(self, f):\n pickle.dump(self.prod_freq, f)",
"def get_frequency_list():\n freqs = {}\n file = get_or_download_wordlist_file()\n for line in file:\n word, freq = line.split()\n freqs[word] = int(freq)\n return freqs",
"def set_frequency(self, f=1e9):\r\n self.write('FREQ '+str(f))",
"def cache_counts(counts, sorted=False, file_ingredients='../data/ingredients-teste.txt',\n frequency_threshold=30): #frequency_threshold=750\n if sorted:\n # another save procedure. counts is not a dictionary\n with open(file_ingredients, 'w') as f:\n for index in range(0, len(counts)):\n name = counts[index][0]\n frequency = counts[index][1]\n if frequency > frequency_threshold:\n f.write(name + ';' + str(frequency) + '\\n')\n else:\n # Not sorted\n with open(file_ingredients, 'w') as f:\n for more_freq in counts.keys():\n if counts[more_freq] > frequency_threshold:\n f.write(more_freq + ';' + str(counts[more_freq]) + '\\n')",
"def frequency(self, freq):\n self.load.write(f'TRAN:FREQ {freq}')",
"def list_to_file(sorted_list, filename):\n doc = Document()\n table = doc.add_table(rows=1, cols=2)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Word'\n hdr_cells[1].text = 'Occurrence'\n\n for key, value in sorted_list:\n row_cells = table.add_row().cells\n row_cells[0].text = key\n row_cells[1].text = str(value)\n\n doc.save(\"sorted - \" + filename)",
"def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))",
"def writeFloatListToFile(ldata, prec, filePath):\n\twith open(filePath, \"w\") as fh:\n\t\tfor d in ldata:\n\t\t\tfh.write(formatFloat(prec, d) + \"\\n\")",
"def write_file(file_path, payload_list):\n\n with open(file_path, \"wb\") as f:\n count = 0\n while count < len(payload_list):\n f.write(payload_list[count])\n count = count + 1",
"def load_frequencies() -> dict:\n with open(\"freq_list\", 'r') as stored_freq_list:\n return json.load(stored_freq_list)",
"def setfrequency(self, value):\n self.instrument.write('FREQ {0}'.format(value))",
"def output_char_freqs(char_freqs):\n total_characters = sum(char_freqs.values())\n with open(\"char_freqs.txt\", 'w') as f:\n for c in sorted(char_freqs.iterkeys()):\n freq = (char_freqs[c] * 100.0) / total_characters\n f.write(\"{} = {:.02f}\\n\".format(c, freq))\n return",
"def update_freq_dist(filename):\r\n pass",
"def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()",
"def frequencies(self, frequencies):\n\n self._frequencies = frequencies",
"def frequencies(filename):\n\n hashtable = QuadraticProbeTable()\n file = open(filename,'r')\n words = []\n\n for item in file:\n item = item.strip('\\n')\n if item not in hashtable:\n hashtable[item] = 1\n words.append(item)\n else: hashtable[item] = hashtable[item] + 1\n\n file = open('FREQUENCY.txt', 'w')\n words = heap_sort(words)\n for item in words: file.write(item + ' ' + str(hashtable[item]) + '\\n')",
"def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)",
"def to_file(\n self,\n filename: str,\n freqs: \"np.array\",\n *,\n formatter: Optional[ModelFormatter] = None,\n ) -> None:\n # change the cwd to the the directory containing the file\n filename = os.path.abspath(filename)\n cwd = os.getcwd()\n dir, _ = os.path.split(filename)\n os.chdir(dir)\n\n # format the file\n with open(filename, \"w\") as file:\n file.write(self.to_string(freqs, formatter=formatter))\n file.close()\n\n # restore the cwd\n os.chdir(cwd)",
"def save_data(self, name, minfreq=0):\n if self._canOperate:\n with open(name, \"w\", encoding=\"utf-8\") as f:\n for word in self._data:\n if self._data[word].freq > minfreq:\n f.write(\"{}\\n{}\\n\".format(self._data[word].str, \",\".join(map(str, self._data[word].pos))))",
"def printFrequencyUpdate(frequencies_list: list):\n\n if len(frequencies_list) == 0:\n frequencies_string = '(no numbers listed yet)'\n else:\n frequencies_string = ', '.join(f'{k}:{v}' for [k, v] in frequencies_list)\n\n if '--inline' in sys.argv or '-i' in sys.argv:\n printAboveInput('(number:frequency)', frequencies_string,\n 'updated at', time.strftime('%H:%M:%S'))\n else:\n print('(number:frequency)', frequencies_string)",
"def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))",
"def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))",
"def print_key_freq(self,\r\n freq_list):\r\n\r\n for key, freq in freq_list:\r\n\r\n display.noteprint((EMPTYCHAR,key + alerts.APPEARS_BEG\\\r\n +len(self.get_indexes_for_key(key)\\\r\n +alerts.APPEARS_END+freq)))",
"def savealist(alist, filename):\n out = open(filename, \"w\")\n for i in alist:\n out.write(str(i) + \"\\n\") # if i is numeric\n out.close()",
"def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")",
"def add_file_char_freqs(file_name, char_freqs):\n with open(file_name, 'r') as f:\n for line in f:\n add_string_char_freqs(line, char_freqs)",
"def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()",
"def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))",
"def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()"
]
| [
"0.65452045",
"0.6469228",
"0.6206968",
"0.61666906",
"0.61665314",
"0.6154107",
"0.6147396",
"0.61170024",
"0.6072871",
"0.60628444",
"0.6055825",
"0.5916679",
"0.59073734",
"0.5903439",
"0.5807971",
"0.5796866",
"0.57708037",
"0.5752463",
"0.57255787",
"0.5714908",
"0.57087857",
"0.5691167",
"0.5691167",
"0.56610173",
"0.55999076",
"0.55884373",
"0.5585628",
"0.5561214",
"0.5541773",
"0.5525826"
]
| 0.78073126 | 0 |
Loads the frequency list stored in a file ('freq_list') and returns it. | def load_frequencies() -> dict:
with open("freq_list", 'r') as stored_freq_list:
return json.load(stored_freq_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_frequency_list():\n freqs = {}\n file = get_or_download_wordlist_file()\n for line in file:\n word, freq = line.split()\n freqs[word] = int(freq)\n return freqs",
"def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)",
"def load(cls, f):\n prod_freq = pickle.load(f)\n return cls(prod_freq)",
"def load_word_freq(self, file_path):\n\n # initialise frequency dict\n # freq = {i: {} for i in range(1,9)}\n freq = {1: {}}\n\n # read csv file\n with open(file_path, 'r', encoding=co.OUT_ENCODING[self.language]) as f:\n reader = csv.reader(f, delimiter=',')\n\n # skip header\n next(reader)\n\n # iterate through csv and add values to dictionary\n for row in reader:\n try:\n # freq[int(row[3])][row[0]] = int(row[1])\n freq[1][row[0]] = int(row[1])\n except ValueError:\n pass\n\n return freq",
"def get_list_frequencies(self):\r\n _debug('simq03b_api.get_list_frequencies')\r\n \r\n s = self.query('SOUR:LIST:FREQ?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_frequencies(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_list_frequencies(self):\r\n s = self.query('SOUR1:LIST:FREQ?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_frequencies(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_list_frequencies(self):\r\n s = self.query('SOUR1:LIST:FREQ?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_frequencies(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def get_list_frequencies(self):\r\n s = self.query('LIST:FREQ?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_frequencies(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def create_most_freq_word_list(filename):\n global most_frequent_words\n\n if not most_frequent_words:\n with open(filename) as fp:\n for line in fp:\n most_frequent_words.append(re.sub(r'\\s+', '', line))",
"def frequency():\n\n return make_simple_tsv_get_response(FREQ_FILE, 'frequency')",
"def load_dictionary(filename):\n\n word_list = []\n freq_sum = 0\n\n # nacitanie zo suboru\n with open(filename) as f:\n for line in f:\n freq, val = line.split()\n word_list.append(Word(int(freq), val))\n freq_sum += int(freq)\n\n # lexikograficke usporiadanie slov\n word_list_sorted = sorted(word_list, key=operator.attrgetter('value'))\n\n return word_list_sorted, freq_sum",
"def get_freqs(filename):\n full = io.read_file(filename)\n full = full.strip('\\n')\n full = full.split('[1/cm]')[1].split('Zero')[0]\n full = full.split()\n nfreqs = full[0]\n freqs = full[1:]\n # [freq=float(freq) for freq in freqs]\n freqs = np.array(list(map(float, freqs)))\n a = freqs.argsort()[::-1]\n freqs = np.sort(freqs)[::-1]\n return freqs.tolist(), a.tolist()",
"def frequencies(self):\n if self.getn(\"frequency/type\") == \"custom\":\n # The value is validated to be a float list\n frequencies = self.getn(\"frequency/frequencies\")\n else:\n # Calculate the frequency values\n start = self.getn(\"frequency/start\")\n stop = self.getn(\"frequency/stop\")\n step = self.getn(\"frequency/step\")\n num = int((stop - start) / step + 1)\n frequencies = [start + step*i for i in range(num)]\n return frequencies",
"def save_frequencies(freqs: dict) -> None:\n with open(\"freq_list\", 'w') as stored_freq_list:\n json.dump(freqs, stored_freq_list)",
"def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies",
"def load_dic():\r\n f = open('../data/phrases-freq.txt')\r\n d = {}\r\n for line in f:\r\n line = line.strip().decode('utf-8')\r\n if line == '' or line[0] == '#':\r\n continue\r\n else:\r\n k, v = line.split()\r\n d[k] = int(v)\r\n return d",
"def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist",
"def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words",
"def get_list_frequencies(self):\r\n return self.fs",
"def load_wordlist(filename):\n with open(filename) as f:\n \tdata = f.read().splitlines()\n return data",
"def initialize_from_file(fname: Path) -> Tuple[List, List]:\n item_set_list = []\n frequency = []\n\n with open(fname, \"r\") as file:\n csv_reader = reader(file)\n for line in csv_reader:\n line = list(filter(None, line))\n item_set_list.append(line)\n frequency.append(1)\n\n return item_set_list, frequency",
"def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table",
"def load_words(silent=False):\n if(not silent):\n print(\"Loading word list from file...\")\n global WORDLIST_FILENAME\n # inFile: filek\n if os.path.exists(\"psets/4/words.txt\"):\n WORDLIST_FILENAME = \"psets/4/\"+WORDLIST_FILENAME\n inFile = open(WORDLIST_FILENAME, 'r')\n\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist",
"def collect_frequencies(nameoffile):\n with open(nameoffile) as text:\n list_of_words = []\n for line in text:\n words = line.split()\n list_of_words = list_of_words + words\n list_of_words = [word.lower() for word in list_of_words]\n\n dict = Counter(list_of_words)\n print(dict)\n return dict",
"def load_vocab(path, encoding=\"UTF-9\"):\n vocab = []\n\n if not os.path.exists(path):\n return vocab\n\n with open(path, encoding=encoding) as fin:\n for line in fin.readlines():\n line = line.strip()\n word, freq = line.split(\"\\t\")\n vocab.append((word,int(freq)))\n\n return vocab",
"def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def load_words():\n print(\"Loading word list from file...\")\n inFile = open(WORDLIST_FILENAME, 'r')\n line = inFile.readline()\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist",
"def load_words():\n print(\"Loading word list from file...\")\n inFile = open(WORDLIST_FILENAME, 'r')\n line = inFile.readline()\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist",
"def get_frequencies(filename):\n freq_dict = {}\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(PARSED, name + \".fix.xml\")\n #soup = bs(open(f, 'r'))\n soup = bs(codecs.open(f, 'r', encoding='utf-8'))\n for sent in soup.findAll('sentence'):\n for token in sent.findAll('token'):\n try:\n w = token.word.string\n if w in freq_dict:\n freq_dict[w] += 1\n else:\n freq_dict[w] = 1\n except AttributeError:\n pass\n return freq_dict",
"def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist"
]
| [
"0.74670994",
"0.6564719",
"0.65079457",
"0.65046173",
"0.635506",
"0.63179034",
"0.63179034",
"0.6299468",
"0.6258039",
"0.6222253",
"0.61386144",
"0.6090718",
"0.5913977",
"0.59138477",
"0.5908088",
"0.5902046",
"0.589864",
"0.5881651",
"0.5880706",
"0.5848204",
"0.5814777",
"0.57940954",
"0.5786885",
"0.5783397",
"0.5778749",
"0.57781446",
"0.57658994",
"0.57658994",
"0.5752553",
"0.57318914"
]
| 0.78461945 | 0 |
After each experiment has been run, need to figure out the worker that will finish next. After each experiment, the model has to update its internal records of what has been tested and how. It then will update the history of the screening. Finally the index of the worker which has now finished is returned so that more work can be assigned. If the final parameter is `True` then there is no need to assign further work and so jobs are killed | def _record_experiment(self, final):
i = np.argmin(self.finish_time) # get the worker which is closest to finishing
idone = self.workers[i][0]
if self.workers[i][1] == 'z':
self.model.tz.remove(idone)
self.model.z[idone] = self.z[idone]
self.model.uu.remove(idone)
self.model.tu.append(idone)
self.history.append((idone, 'z'))
else:
self.model.ty.remove(idone)
self.model.y[idone] = self.y[idone]
self.model.tu.remove(idone)
self.model.tt.append(idone)
self.history.append((idone, 'y'))
if final:
self.workers[i] = None
self.finish_time[i] = np.inf
else:
return i | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(worker_idx: int, work_queue: Queue, result_queue: Queue):\n game = self.get_env()\n predictor = self.get_model(game)\n msg.good(f\"Worker {worker_idx} started.\")\n\n while (\n ParallelPracticeRunner.request_quit is False\n and work_queue.empty() is False\n ):\n episode, args = work_queue.get()\n start = time.time()\n try:\n (\n episode_examples,\n episode_reward,\n is_win,\n problem,\n ) = self.execute_episode(\n episode,\n game,\n predictor,\n is_verbose_worker=worker_idx == 0,\n **args,\n )\n except KeyboardInterrupt:\n break\n except Exception as e:\n err = print_error(e, f\"Self-practice episode threw\")\n result_queue.put((i, [], {\"error\": err}))\n continue\n duration = time.time() - start\n episode_summary = EpisodeSummary(\n complexity=problem.complexity,\n text=problem.text,\n reward=episode_reward,\n solved=bool(is_win),\n duration=duration,\n )\n result_queue.put((i, episode_examples, episode_summary))\n return 0",
"def run(self):\n\n self._logger.debug(\"Starting Dummy Model: modelID=%s;\" % (self._modelID))\n\n # =========================================================================\n # Initialize periodic activities (e.g., for model result updates)\n # =========================================================================\n periodic = self._initPeriodicActivities()\n\n self._optimizedMetricLabel = self._optimizeKeyPattern\n self._reportMetricLabels = [self._optimizeKeyPattern]\n\n # =========================================================================\n # Create our top-level loop-control iterator\n # =========================================================================\n if self._iterations >= 0:\n iterTracker = iter(xrange(self._iterations))\n else:\n iterTracker = iter(itertools.count())\n\n # =========================================================================\n # This gets set in the unit tests. It tells the worker to sys exit\n # the first N models. This is how we generate orphaned models\n doSysExit = False\n if self._sysExitModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._sysExitModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n doSysExit = True\n\n if self._delayModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._delayModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n time.sleep(10)\n \n # DEBUG!!!! infinite wait if we have 50 models\n #if len(modelIDs) >= 50:\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n # while not jobCancel:\n # time.sleep(1)\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n\n if self._errModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._errModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n raise RuntimeError(\"Exiting with error due to errModelRange parameter\")\n\n # =========================================================================\n # Delay, if necessary\n if self._delay is not None:\n time.sleep(self._delay)\n\n # =========================================================================\n # Run it!\n # =========================================================================\n self._currentRecordIndex = 0\n while True:\n\n # =========================================================================\n # Check if the model should be stopped\n # =========================================================================\n\n # If killed by a terminator, stop running\n if self._isKilled:\n break\n\n # If job stops or hypersearch ends, stop running\n if self._isCanceled:\n break\n\n # If model is mature, stop running ONLY IF we are not the best model\n # for the job. Otherwise, keep running so we can keep returning\n # predictions to the user\n if self._isMature:\n if not self._isBestModel:\n self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED\n break\n else:\n self._cmpReason = self._jobsDAO.CMPL_REASON_EOF\n\n # =========================================================================\n # Get the the next record, and \"write it\"\n # =========================================================================\n try:\n self._currentRecordIndex = next(iterTracker)\n except StopIteration:\n break\n\n # \"Write\" a dummy output value. This is used to test that the batched\n # writing works properly\n\n self._writePrediction(ModelResult(None, None, None, None))\n\n periodic.tick()\n\n # =========================================================================\n # Compute wait times. See if model should exit\n # =========================================================================\n\n if self.__shouldSysExit(self._currentRecordIndex):\n sys.exit(1)\n\n # Simulate computation time\n if self._busyWaitTime is not None:\n time.sleep(self._busyWaitTime)\n self.__computeWaitTime()\n\n # Asked to abort after so many iterations?\n if doSysExit:\n sys.exit(1)\n\n # Asked to raise a jobFailException?\n if self._jobFailErr:\n raise utils.JobFailException(\"E10000\",\n \"dummyModel's jobFailErr was True.\")\n\n # =========================================================================\n # Handle final operations\n # =========================================================================\n if self._doFinalize:\n if not self._makeCheckpoint:\n self._model = None\n\n # Delay finalization operation\n if self._finalDelay is not None:\n time.sleep(self._finalDelay)\n\n self._finalize()\n\n self._logger.info(\"Finished: modelID=%r \"% (self._modelID))\n\n return (self._cmpReason, None)",
"def __execute_experiment__(self, *args, **kwargs):\n from klibs.KLGraphics import clear\n\n if self.blocks == None:\n self.blocks = self.trial_factory.export_trials()\n\n P.block_number = 0\n P.trial_id = 0\n for block in self.blocks:\n P.recycle_count = 0\n P.block_number += 1\n P.practicing = block.practice\n self.block()\n P.trial_number = 1\n for trial in block: # ie. list of trials\n try:\n P.trial_id += 1 # Increments regardless of recycling\n self.__trial__(trial, block.practice)\n P.trial_number += 1\n except TrialException:\n block.recycle()\n P.recycle_count += 1\n clear() # NOTE: is this actually wanted?\n self.rc.reset()\n self.clean_up()\n\n self.incomplete = False\n if 'session_info' in self.database.tables:\n where = {'session_number': P.session_number}\n self.database.update('session_info', {'complete': True}, where)",
"def _select_and_run_experiment(self, i):\n ipick = self.model.pick()\n if ipick in self.model.uu:\n self.workers[i] = (ipick, 'z')\n self.model.tz.append(ipick)\n self.model.b -= self.cz\n self.finish_time[i] += np.random.uniform(self.cz, self.cz * 2)\n else:\n self.workers[i] = (ipick, 'y')\n self.model.ty.append(ipick)\n self.model.b -= self.cy\n self.finish_time[i] += np.random.uniform(self.cy, self.cy * 2)",
"def algorithm(self):\n self.logger.debug(\"Starting\")\n while(True):\n for status, worktype in states():\n limit = self.slaves.queueableTasks()\n if not self._lockWork(limit=limit, getstatus=status, setstatus='HOLDING'):\n continue\n pendingwork = self._getWork(limit=limit, getstatus='HOLDING')\n self.logger.info(\"Retrieved a total of %d %s works\" %(len(pendingwork), worktype))\n self.logger.debug(\"Retrieved the following works: \\n%s\" %(str(pendingwork)))\n self.slaves.injectWorks([(worktype, work, None) for work in pendingwork])\n for task in pendingwork:\n self.updateWork(task['tm_taskname'], 'QUEUED')\n self.logger.info('Worker status:')\n self.logger.info(' - free slaves: %d' % self.slaves.freeSlaves())\n self.logger.info(' - acquired tasks: %d' % self.slaves.queuedTasks())\n self.logger.info(' - tasks pending in queue: %d' % self.slaves.pendingTasks())\n\n finished = self.slaves.checkFinished()\n self.updateFinished(finished)\n if self.TEST:\n #if we are testing we just do one cycle\n break\n\n time.sleep(self.config.TaskWorker.polling)\n\n self.logger.debug(\"Stopping\")",
"def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True",
"def final(self, **kwargs):\n epoch = kwargs[\"epoch\"] + 1\n if epoch >= self.ignore_before:\n name = self.prepend + \"training_epoch_{}_FINAL.h5\".format(epoch)\n full_path = os.path.join(self.path, name)\n self.save_model(kwargs[\"trainer\"], full_path)\n else:\n print(\"Minimum iterations to store model not reached.\")\n\n if self.best_model is not None:\n best_model = deepcopy(self.best_model)\n best_res = self.best_res\n if self.window is not None:\n print(\"Best result during training: {:.2f}.\\n In a window of size {} \"\n \"starting in epoch {} with best mean value of {} \\n Saving model..\".format(best_res,\n self.window,\n self.best_window_start,\n self.best_mean_res))\n else:\n print(\n \"Best result during training: {:.2f}. Saving model..\".format(\n best_res\n )\n )\n name = self.prepend + \"BEST_ITERATION.h5\"\n torch.save(best_model, os.path.join(self.path, name))\n self.reset()",
"def work(workerid: int, problem_import: str, problem_func: str, savepath: str,\r\n jobqueue: Queue, resultqueue: Queue):\r\n import random\r\n seedmax = 2**30\r\n for _ in range(100000):\r\n random.randrange(seedmax)\r\n random.seed(time.time())\r\n for _ in range(2000):\r\n random.randrange(seedmax)\r\n\r\n seed1 = random.randrange(seedmax)\r\n seed2 = random.randrange(seedmax)\r\n np.random.seed(seed1)\r\n torch.manual_seed(seed2)\r\n\r\n\r\n mod = importlib.import_module(problem_import)\r\n problem: CompleteEvolutionProblem = getattr(mod, problem_func)()\r\n\r\n logger = logging.getLogger(f'{__name__}_worker_{workerid}')\r\n logger.setLevel(logging.DEBUG)\r\n\r\n handler = logging.handlers.RotatingFileHandler(\r\n os.path.join(savepath, f'worker_{workerid}.log'),\r\n maxBytes=1024*256, backupCount=5)\r\n handler.setLevel(logging.DEBUG)\r\n handler.setFormatter(logging.Formatter(\r\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p'))\r\n logger.addHandler(handler)\r\n\r\n\r\n signal.signal(signal.SIGINT, lambda *args: logger.critical('Ignoring SIGINT'))\r\n\r\n try:\r\n logger.info('Worker spawned: numpy seed=%s, torch seed=%s', seed1, seed2)\r\n while True:\r\n while True:\r\n try:\r\n job = jobqueue.get()\r\n break\r\n except InterruptedError:\r\n logger.exception('jobqueue.get() interrupted - ignoring')\r\n\r\n if not job:\r\n logger.info('got shutdown message')\r\n break\r\n\r\n logger.info('got new job: %s', str(job))\r\n approach: EvolutionProblem = problem.approaches[job[0]]\r\n trainer, network = approach.realize(job[1], **job[2])\r\n result = trainer.train(network, logger=logger)\r\n #result = {'accuracy': np.random.uniform(0, 1) + np.sin(job[2]['learning_rate'] * ((2*np.pi) / 0.1))} # for testing pylint: disable=line-too-long\r\n logger.info('finished job with result: %s', str(result))\r\n resultqueue.put(result)\r\n except: #pylint: disable=bare-except\r\n logger.exception('Fatal exception')\r\n\r\n while not resultqueue.empty():\r\n time.sleep(0.1)\r\n while not jobqueue.empty():\r\n try:\r\n jobqueue.get_nowait()\r\n except Empty:\r\n pass\r\n\r\n while not jobqueue.empty():\r\n try:\r\n jobqueue.get_nowait()\r\n except Empty:\r\n pass\r\n\r\n logger.info('shutdown')\r\n logging.shutdown()",
"def finished_tests(self):\n self.testing = 0",
"def next_parameters(self):\n\n if self.number_of_runs == -1 or self.runs_performed < self.number_of_runs:\n self.runs_performed += 1\n _log.debug('%d runs performed (calls to `next_parameters()`)' % self.runs_performed)\n else:\n _log.info('No more parameters to test in the database.')\n return None\n \n records = self.db.get_table()\n _log.debug('Retrieved %d parameters' % len(records))\n\n # Do we have a last-test in the config file\n if self.config and \"last-test\" in self.config and self.config[\"last-test\"]:\n _log.info('Using `last-test` with id=\"%s\" from config.txt' %\n str(self.config[\"last-test\"]))\n for i in range(0, len(records)):\n if (\n str(self.config.config[\"last-test\"]) == str(records[i][\"id\"]) and\n records[i][\"status\"] != \"successful\"\n ):\n records[i][\"status\"] = \"in progress\"\n if \"start-time\" in records[i]:\n records[i][\"start-time\"] = \\\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if \"performed-by\" in records[i]:\n records[i][\"performed-by\"] = self.performed_by\n\n self.db.update_row(i, records[i])\n\n return records[i]\n\n for i in range(0, len(records)):\n if not len(records[i][\"status\"]):\n if (\n 'computer-strength' in records[i] and \n self.computer_strength < int(records[i][\"computer-strength\"])\n ):\n continue\n \n records[i][\"status\"] = \"in progress\"\n if \"start-time\" in records[i]:\n records[i][\"start-time\"] = \\\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if \"performed-by\" in records[i]:\n records[i][\"performed-by\"] = self.performed_by\n self.db.update_row(i, records[i])\n\n # Save id to local cache\n if self.config:\n self.config.update(key='last-test', value=str(records[i][\"id\"]))\n\n return records[i]\n\n return None",
"def train_workers(self):\n args = dict(actor=self.actor,\n critic=self.critic,\n gamma=self.gamma,\n lamda=self.lamda or self.gamma / 1.005,\n device=self.device,\n optimizers=[self.actor_optimizer, self.critic_optimizer])\n workers = [Worker(i, self.action_size, self.state_size, **args)\n for i in range(self.n_workers)\n ]\n\n print(f'Worker count: {len(workers)}')\n\n for worker in workers:\n worker.start()\n\n while len(constants.scores) < self.n_steps:\n time.sleep(400) # save checkpoint every 400 ms\n\n print(f'\\nCurrent scores: {constants.scores}')\n\n self.save(constants.episode)\n print(f'\\nCheckpoint saved at episode: {constants.episode}\\n')",
"def work(self, rank):\n print(f'Worker PID: {os.getpid()}')\n self.stats['n_active_workers'].add_(1)\n\n # Local environment\n env = gym.make(ENV)\n nS = env.observation_space.shape[0]\n nA = env.action_space.n\n\n # Local seed\n local_seed = self.seed + rank\n env.seed(local_seed)\n torch.manual_seed(local_seed)\n np.random.seed(local_seed)\n random.seed(local_seed)\n\n # Policy\n local_policy = self.policy_class(output_dim=self.output_dim)\n local_policy.load_state_dict(self.shared_policy.state_dict())\n # print(f'Hashcode of local policy: {hash(local_policy)}')\n\n # Value function\n local_state_value_function = self.state_value_function_class()\n local_state_value_function.load_state_dict(self.shared_state_value_function.state_dict())\n # print(f'Hashcode of local state value function: {hash(local_state_value_function)}')\n\n # Update stats episode in place, and get the previous episode number before updating\n global_episode_idx = self.stats['episode'].add_(1).item() - 1\n\n while not self.get_out_signal:\n\n # Initialize for each episode\n state = env.reset()\n # ?\n total_episode_rewards = 0\n # ?\n logprobs = []\n entropies = []\n rewards = []\n values = []\n\n # count() generate values starting from start and default interval 1\n # This for loop breaks with break by if statement with done\n for step in count(start=1):\n state, reward, done = self.interaction_step(\n state, env, local_policy, local_state_value_function,\n logprobs, entropies, rewards, values\n )\n\n total_episode_rewards += reward\n\n if done:\n # TODO: Process state\n # TODO: Stack state\n next_value = local_state_value_function(state).detach().item()\n rewards.append(next_value)\n\n # Update policy and state value function at the end of each episode with collected experience\n self.optimize_model(\n logprobs, entropies, rewards, values, local_policy, local_state_value_function\n )\n\n # Clear experiences for next episode\n logprobs = []\n entropies = []\n rewards = []\n values = []\n\n if done:\n # Trigger a manual garbage collection process to clean up objects\n gc.collect()\n # Break for step in count(start=1)\n break\n\n # Get stats of each episode\n # Value of 'episode_reward' is torch tensor size max_episodes, recording total rewards in each element\n self.stats['episode_reward'][global_episode_idx].add_(total_episode_rewards)\n\n # Save model\n torch.save(local_policy.state_dict(), self.path_save_policy)\n\n with self.get_out_lock:\n potential_next_global_episode_idx = self.stats['episode'].item()\n if something:\n self.get_out_signal.add_(1)\n # Break for while not self.get_out_signal\n break\n\n # Else go to another episode\n global_episode_idx = self.stats['episode'].add_(1).item() - 1\n\n\n return None",
"def afterWork(self):\n pass",
"def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return",
"def pytest_finished_handling_group(session, worker):",
"def work(self, job):\n pass",
"def submit_experiment_run(self, wait_for_completion: bool = True):\n raise NotImplementedError",
"def run_experiment():\n pass",
"def experiment_callback(self, args):\n # If args is None, that means that an exception was raised during the\n # execution of the experiment. In such case, ignore it\n if not args:\n self.n_fail += 1\n return\n # Extract parameters\n params, results, duration = args\n self.n_success += 1\n # Store results\n self.results.add(params, results)\n self.exp_durations.append(duration)\n if self.n_success % self.summary_freq == 0:\n # Number of experiments scheduled to be executed\n n_scheduled = self.n_exp - (self.n_fail + self.n_success)\n # Compute ETA\n n_cores = min(mp.cpu_count(), self.n_proc)\n mean_duration = sum(self.exp_durations) / len(self.exp_durations)\n eta = timestr(n_scheduled * mean_duration / n_cores, False)\n # Print summary\n logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',\n self.n_success, self.n_fail, n_scheduled, eta)",
"def test(args, worker_id: int, global_model: torch.nn.Module, T: Value, global_reward: Value = None,\n optimizer: torch.optim.Optimizer = None, global_model_critic: CriticNetwork = None,\n optimizer_critic: torch.optim.Optimizer = None):\n\n logging.info(\"test worker started.\")\n torch.manual_seed(args.seed + worker_id)\n\n if \"RR\" in args.env_name:\n env = quanser_robots.GentlyTerminating(gym.make(args.env_name))\n else:\n if args.monitor:\n env = Monitor(gym.make(args.env_name), '100_test_runs', video_callable=lambda count: count % 100 == 0,\n force=True)\n else:\n env = gym.make(args.env_name)\n\n env.seed(args.seed + worker_id)\n\n normalizer = get_normalizer(args.normalizer, env)\n\n # get an instance of the current global model state\n model = copy.deepcopy(global_model)\n model.eval()\n\n model_critic = None\n if global_model_critic:\n model_critic = copy.deepcopy(global_model_critic)\n model_critic.eval()\n\n state = torch.from_numpy(env.reset())\n\n writer = SummaryWriter(comment='_test', log_dir='experiments/runs/')\n start_time = time.time()\n\n t = 0\n episode_reward = 0\n\n done = False\n global_iter = 0\n best_global_reward = -np.inf\n best_test_reward = -np.inf\n\n while True:\n\n # Get params from shared global model\n model.load_state_dict(global_model.state_dict())\n if not args.shared_model:\n model_critic.load_state_dict(global_model_critic.state_dict())\n\n rewards = []\n eps_len = []\n\n sleep = True\n\n # make 10 runs to get current avg performance\n for i in range(args.test_runs):\n while not done:\n t += 1\n\n if not args.no_render:\n if i == 0 and t % 1 == 0 and \"RR\" not in args.env_name:\n env.render()\n if args.monitor and sleep: # add a small delay to do a screen capture of the test run if needed\n time.sleep(1)\n sleep = False\n\n # apply min/max scaling on the environment\n\n with torch.no_grad():\n\n # select mean of normal dist as action --> Expectation\n if args.shared_model:\n _, mu, _ = model(normalizer(state))\n else:\n mu, _ = model(normalizer(state))\n\n action = mu.detach()\n\n state, reward, done, _ = env.step(np.clip(action.numpy(), -args.max_action, args.max_action))\n\n done = done or t >= args.max_episode_length\n episode_reward += reward\n\n if done:\n # reset current cumulated reward and episode counter as well as env\n rewards.append(episode_reward)\n episode_reward = 0\n\n eps_len.append(t)\n t = 0\n\n state = env.reset()\n\n state = torch.from_numpy(state)\n\n # necessary to make more than one run\n done = False\n\n time_print = time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - start_time))\n\n std_reward = np.std(rewards)\n rewards = np.mean(rewards)\n\n new_best = rewards > best_test_reward\n writer.add_scalar(\"reward/test\", rewards, int(T.value))\n writer.add_scalar(\"episode/length\", np.mean(eps_len), int(T.value))\n\n log_string = f\"Time: {time_print}, T={T.value} -- n_runs={args.test_runs} -- mean total reward={rewards:.5f} \" \\\n f\" +/- {std_reward:.5f} -- mean episode length={np.mean(eps_len):.5f}\" \\\n f\" +/- {np.std(eps_len):.5f} -- global reward={global_reward.value:.5f}\"\n\n if new_best:\n # highlight messages if progress was done\n logging.info(log_string)\n\n best_global_reward = global_reward.value if global_reward.value > best_global_reward else best_global_reward\n best_test_reward = rewards if rewards > best_test_reward else best_test_reward\n model_type = 'shared' if args.shared_model else 'split'\n\n save_checkpoint({\n 'epoch': T.value,\n 'model': model.state_dict(),\n 'model_critic': model_critic.state_dict() if model_critic is not None else None,\n 'global_reward': global_reward.value,\n # only save optimizers if shared ones are used\n 'optimizer': optimizer.state_dict() if optimizer else None,\n 'optimizer_critic': optimizer_critic.state_dict() if optimizer_critic else None,\n },\n path=f\"./experiments/checkpoints/model_{model_type}_T-{T.value}_global-{global_reward.value:.5f}_test-{rewards:.5f}.pth.tar\")\n else:\n # use by default only debug messages if no progress was reached\n logging.debug(log_string)\n\n global_iter += 1\n\n # run evaluation only once in test mode\n if args.test:\n break",
"def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")",
"def test_run_experiment_locally(self) -> None:\n\n experiment = Experiment(\n name=\"torchx_booth_sequential_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n )\n ),\n options=SchedulerOptions(),\n )\n\n try:\n for _ in range(3):\n scheduler.run_n_trials(max_trials=2)\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.",
"def Finish(experiment, ln):\n # Move to the \"results\" folder within the experiment's home directory\n os.chdir(experiment[\"Folder\"] + \"results/\")\n # Make a folder of the best structures in each library\n list = os.listdir(\"./\")\n # If a \"best\" folder is not already in the \"results\" folder, make it\n if \"best\" not in list:\n os.mkdir(\"best\")\n # Move to the \"best\" folder\n os.chdir(\"best\")\n # Make a folder for the library\n os.mkdir(\"library\" + str(ln))\n os.chdir(\"library\" + str(ln))\n # Find the best iteration in the library's results folder\n folder = experiment[\"Folder\"] + \"library\" + str(ln) + \"/results/\"\n list = os.listdir(folder)\n best = 0\n # Go through the information in the \"results\" folder\n for name in list:\n if name.startswith(\"iteration\"):\n # Get the iteration number\n iteration = int(name[9:])\n # If it is higher than \"best\", then store its value\n if iteration > best:\n best = iteration\n # Copy the information from the \"best\" in that folder into the experiment's\n # home results folder\n folder += \"iteration\" + str(best) + \"/\"\n # List the files within this folder\n files = os.listdir(folder)\n # Copy each file to the experiment's results \"best\" folder\n for file in files:\n os.system(\"cp \" + folder + file + \" ./\") \n # List the sequence information and energy information in the summary file\n text = \"LIBRARY \" + str(ln) + \" RESULTS\\n\"\n # Gather the total number of groups to have their information output\n groups = len(experiment)\n # Create a list of all Target Molecules in the experiment\n target_molecules = []\n # Go through all of the Molecules in the experiment\n for molecule in experiment[0]:\n # If it is a Target Molecule\n if not molecule.design:\n # Then store it\n target_molecules.append(molecule.name)\n # Now gather all of the Design Molecules\n molecules = []\n # Go through the files\n for file in files:\n # If it is a Molecule File, get the name of the Molecule\n name = file.split(\".\")[0][-1]\n # If it is in the 1st Binding Assembly (to avoid redundancy), store it\n # if it is not in the list of Target Molecules, meaning it is a Design\n # Molecule\n if file.startswith(\"Group1_Molecule\") and name not in target_molecules:\n molecules.append(name)\n molecules.sort()\n # Create a Summary of the amino acids used within each CDR, as well as the\n # canonical structures used to make the CDRs\n # List the canonical structure information\n # Get the optimal set of canonical structures\n solution = experiment[\"Scores\"][ln-1][1]\n # Output the score\n canonical = \"The score for the set of canonical structures used is \"\n canonical += str(solution[\"Score\"]) + \"\\n\"\n # Store the position information for each of the CDRs\n ranges = {1: range(27, 39), 2: range(56, 66), 3: range(105, 118)}\n # Go thorugh each of the CDRs and output the canonical structure used\n associations = molecule_name_association(experiment, molecules)\n cdrs = list(associations.keys())\n cdrs.sort()\n # Store the sequence information in this string\n sequence = \"\"\n for num, cdr in enumerate(cdrs):\n # Add the canonical structure information\n canonical += \"The \" + cdr + \" CDR used canonical structure #\"\n canonical += str(solution[num+1]) + \"\\n\" \n # Get the appropriate Molecule for the CDR\n name = \"Group1_Molecule\" + associations[cdr] + \".pdb\"\n mol = MOLECULES.MoleculeFile(name)[0]\n # Go through all of the residues\n for res in mol:\n # Get its name so that its position may be extracted\n rName = res.name\n # If the name is composed of only digits\n if rName.isdigit():\n pass\n # If the last character is a letter\n elif rName[:-1].isdigit() and rName[-1].isalpha():\n rName = rName[:-1] \n # Convert the name to an integer\n rName = int(rName)\n # If this position lies within the CDR position, add its sequence\n # information\n if rName in ranges[int(cdr[-1])]:\n sequence += cdr + \" Residue \" + str(rName) + \" in Molecule \"\n sequence += mol.name + \": \" + res.kind + \"\\n\"\n # Store the Energy information\n energy = \"\"\n # Go through the Binding Assemblies\n for gn in range(1, groups + 1):\n # Open the Energy file\n name = \"Group\" + str(gn) + \"_Energies.txt\"\n f = open(name, \"r\")\n # Go through the file\n for line in f:\n # Split the line on white space\n items = line.split()\n # Add the text to the energy string\n energy += \"The \" + items[0] + \" \" + items[1][:-1] + \" of Design \"\n energy += \"Group \" + str(gn) + \" is \" + items[2] + \" kcal / mol\\n\" \n # Close the file\n f.close()\n # Change back to the Experiment's home directory\n os.chdir(experiment[\"Folder\"])\n # Add all of this information to the Summary file\n experiment[\"Summary\"] += canonical + sequence + energy + \"\\n\\n\"\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close() \n # Move the library to the results folder\n command = \"mv library\" + str(ln) + \" results/\" \n os.system(command)\n # If this is the final library, delete the SCORES.txt file\n if ln == experiment['Optcdr Libraries']:\n os.system(\"rm SCORES.txt\")",
"def timesteps_experiment():\n\n print(\"TIMESTEPS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'timestep_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'time_steps'\n changing_param_value = [1, 2, 4, 8, 16, 32, 64, 128, 256]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n set_params(use_word_emb=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)",
"def do_work(self):",
"def sync_experiment_state_with_ddb(self):\n record = self.exp_db_client.get_experiment_record(self.experiment_id)\n\n # sync records to experiment states\n self.experiment_manager.experiment_record = ExperimentRecord.load_from_ddb_record(record)\n\n # update training workflow if needed\n training_workflow_metadata = record.get(\"training_workflow_metadata\", None)\n # first update any in-progress next_model_to_train\n next_model_to_train_id = self.experiment_manager.experiment_record._next_model_to_train_id\n training_state = self.experiment_manager.experiment_record._training_state\n if next_model_to_train_id is not None and training_state.endswith(\"ING\"):\n if self.experiment_manager.next_model_to_train is not None:\n self.experiment_manager.next_model_to_train.update_model_training_state()\n else:\n # only init the ModelManager() if the training job record already exists\n if (\n self.model_db_client.get_model_record(\n self.experiment_id, next_model_to_train_id\n )\n is not None\n ):\n next_model_to_train = ModelManager(\n model_db_client=self.model_db_client,\n experiment_id=self.experiment_id,\n model_id=next_model_to_train_id,\n )\n next_model_to_train.update_model_training_state()\n time.sleep(1)\n self._update_experiment_db_training_workflow_metadata(training_workflow_metadata)\n\n # update evaluation workflow if needed\n evaluation_workflow_metadata = record.get(\"evaluation_workflow_metadata\", None)\n # first update any in-progress next_evaluation_job\n next_evaluation_job_id = self.experiment_manager.experiment_record._next_evaluation_job_id\n evaluation_state = self.experiment_manager.experiment_record._evaluation_state\n if next_evaluation_job_id is not None and evaluation_state.endswith(\"ING\"):\n if self.experiment_manager.next_model_to_evaluate is not None:\n self.experiment_manager.next_model_to_evaluate.update_model_evaluation_state()\n else:\n # only init the ModelManager() if the evaluation job record already exists\n if (\n self.model_db_client.get_model_record(\n self.experiment_id, next_evaluation_job_id.split(\"-eval-\")[0]\n )\n is not None\n ):\n next_model_to_evaluate = ModelManager(\n model_db_client=self.model_db_client,\n experiment_id=self.experiment_id,\n model_id=next_evaluation_job_id.split(\"-eval-\")[0],\n )\n next_model_to_evaluate.update_model_evaluation_state()\n time.sleep(1)\n self._update_experiment_db_evaluation_workflow_metadata(evaluation_workflow_metadata)\n\n # update hosting workflow if needed\n hosting_workflow_metadata = record.get(\"hosting_workflow_metadata\", None)\n self._update_experiment_db_hosting_workflow_metadata(hosting_workflow_metadata)\n\n # update joining workflow if needed\n joining_workflow_metadata = record.get(\"joining_workflow_metadata\", None)\n # first update any in-progress next_join_job\n next_join_job_id = self.experiment_manager.experiment_record._next_join_job_id\n joining_state = self.experiment_manager.experiment_record._joining_state\n if next_join_job_id is not None and joining_state.endswith(\"ING\"):\n if self.experiment_manager.next_join_job is not None:\n self.experiment_manager.next_join_job.update_join_job_state()\n else:\n # only init the JoinManager() if the join job record already exists\n if (\n self.join_db_client.get_join_job_record(self.experiment_id, next_join_job_id)\n is not None\n ):\n next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n )\n next_join_job.update_join_job_state()\n time.sleep(1)\n self._update_experiment_db_joining_workflow_metadata(joining_workflow_metadata)\n\n self.emit_cloudwatch_metrics_for_training_and_hosting()",
"def test_workon(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5)\n assert len(experiment.fetch_trials()) == 5\n assert experiment.name == \"loop\"\n assert isinstance(experiment._experiment._storage, Legacy)\n assert isinstance(experiment._experiment._storage._db, EphemeralDB)",
"def testSearchAlgFinishes(self):\n\n class FinishFastAlg(SuggestionAlgorithm):\n _index = 0\n\n def next_trials(self):\n trials = []\n self._index += 1\n\n for trial in self._trial_generator:\n trials += [trial]\n break\n\n if self._index > 4:\n self._finished = True\n return trials\n\n def _suggest(self, trial_id):\n return {}\n\n ray.init(num_cpus=2)\n experiment_spec = {\n \"run\": \"__fake\",\n \"num_samples\": 2,\n \"stop\": {\n \"training_iteration\": 1\n }\n }\n searcher = FinishFastAlg()\n experiments = [Experiment.from_json(\"test\", experiment_spec)]\n searcher.add_configurations(experiments)\n\n runner = TrialRunner(search_alg=searcher)\n self.assertFalse(runner.is_finished())\n runner.step() # This launches a new run\n runner.step() # This launches a 2nd run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # This kills the first run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # This kills the 2nd run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # this converts self._finished to True\n self.assertTrue(searcher.is_finished())\n self.assertRaises(TuneError, runner.step)",
"def run_experiment(self):\n\n start_time = time.time()\n\n strategy_instance = None\n if (self.strategy == 'ccegp'):\n strategy_instance = CCEGPStrategy(self)\n else:\n print('strategy unknown:', self.strategy)\n sys.exit(1)\n\n # For each run...\n for curr_run in range(1, self.num_runs_per_experiment + 1):\n\n # Update log\n self.curr_run = curr_run\n print('\\nRun', curr_run)\n self.log_file.write('\\nRun ' + str(curr_run) + '\\n')\n\n # Execute one run and get best values.\n attacker_run_high_fitness, attacker_run_best_world_data, attacker_run_best_solution, \\\n defender_run_high_fitness, defender_run_best_solution, attacker_dot, defender_dot \\\n = strategy_instance.execute_one_run()\n\n print('\\nBest attacker tree of run:\\n' + attacker_run_best_solution)\n if (self.print_dots):\n print('\\nBest attacker dot of run:\\n' + str(attacker_dot))\n print('\\nBest defender tree of run:\\n' + defender_run_best_solution)\n if (self.print_dots):\n print('\\nBest defender dot of run:\\n' + str(defender_dot))\n\n # If best of run is best overall, update appropriate values\n if (self.strategy != 'ccegp'):\n if (attacker_run_high_fitness > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = attacker_run_high_fitness\n print('New exp Attacker high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n # If Competitive Co-evolution, add fitnesses (use Attacker to store most data)\n else:\n if ((attacker_run_high_fitness + defender_run_high_fitness) > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = (attacker_run_high_fitness + defender_run_high_fitness)\n print('New exp Attacker+Defender high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.defender_exp_best_solution = defender_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n self.defender_exp_best_dot = defender_dot\n\n # Dump best world to file\n the_file = open(self.high_score_world_file_path, 'w')\n for line in self.attacker_exp_best_world_data:\n the_file.write(line)\n the_file.close()\n\n # Dump best Attacker solution (text) to file\n the_file = open(self.attacker_solution_file_path, 'w')\n the_file.write(self.attacker_exp_best_solution)\n the_file.close()\n\n # Dump best Defender solution (text) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_file_path, 'w')\n the_file.write(self.defender_exp_best_solution)\n the_file.close()\n\n # Dump best Attacker solution (dot) to file\n the_file = open(self.attacker_solution_dot_path, 'w')\n the_file.write(str(self.attacker_exp_best_dot))\n the_file.close()\n\n # Dump best Defender solution (dot) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_dot_path, 'w')\n the_file.write(str(self.defender_exp_best_dot))\n the_file.close()\n\n # Dump and display best Attacker solution\n if (self.render_solutions):\n self.attacker_exp_best_dot.render(filename=self.attacker_solution_png_path,\n view=self.attacker_open_png,\n format='png')\n\n # Dump and display best Defender solution\n if (self.render_solutions and self.strategy == 'ccegp'):\n self.defender_exp_best_dot.render(filename=self.defender_solution_png_path,\n view=self.defender_open_png,\n format='png')\n\n # Close out the log file\n if (not(self.log_file is None)):\n self.log_file.close()\n\n print(time.time() - start_time, 'seconds')",
"def watch_worker():\n global isFinished, ComputationTime, UsersOnline, N, CurrentIndex, Count\n received_data = request.json\n Count += received_data\n if CurrentIndex >= N:\n print 'Second text got ', Count, ' entries of given row.'\n print '--- %s seconds ---' % (time.time() - ComputationTime)\n isFinished = True\n return jsonify(current_row='', current_part='')\n else:\n print 'Current row in second text: ', CurrentIndex / 256\n part = SecondText[CurrentIndex:CurrentIndex+1023]\n CurrentIndex += 1024\n return jsonify(current_row=Row, current_part=part)"
]
| [
"0.63682723",
"0.6095888",
"0.608978",
"0.604774",
"0.5840345",
"0.5793326",
"0.57848585",
"0.57784337",
"0.57582265",
"0.57426524",
"0.5739973",
"0.57374585",
"0.57232004",
"0.5715764",
"0.5707506",
"0.56954587",
"0.56887776",
"0.5670632",
"0.5664259",
"0.5647323",
"0.56454694",
"0.5640493",
"0.56397253",
"0.5636408",
"0.5631453",
"0.5624987",
"0.56216687",
"0.5621364",
"0.56174046",
"0.560993"
]
| 0.769474 | 0 |
Performs the full automated screening with multiple workers. First each worker (determined by the number of threads) is assigned a material to investigate. After this initialisation, the screener alternates selecting and recording experiments. This proceeds until the budget is spent (all the while recording the history of the work). After the budget is spent s.t. no expensive tests can be run, the remaining jobs finish. | def full_screen(self,ploton=False):
self._screener_init() # initialise the model with a single expensive test
for i in range(self.nthreads): # at the start, give the workers a job to do each
self._select_and_run_experiment(i)
while self.model.b >= self.cy: # spend budget till cant afford any more expensive tests
i = self._record_experiment(final=False)
self._select_and_run_experiment(i)
if ploton:
self.model.plot(self.model.x,self.y,self.z)
for i in range(self.nthreads): # finish up any remaining jobs and record their results
self._record_experiment(final=True)
return self.history | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_workers(self):\n args = dict(actor=self.actor,\n critic=self.critic,\n gamma=self.gamma,\n lamda=self.lamda or self.gamma / 1.005,\n device=self.device,\n optimizers=[self.actor_optimizer, self.critic_optimizer])\n workers = [Worker(i, self.action_size, self.state_size, **args)\n for i in range(self.n_workers)\n ]\n\n print(f'Worker count: {len(workers)}')\n\n for worker in workers:\n worker.start()\n\n while len(constants.scores) < self.n_steps:\n time.sleep(400) # save checkpoint every 400 ms\n\n print(f'\\nCurrent scores: {constants.scores}')\n\n self.save(constants.episode)\n print(f'\\nCheckpoint saved at episode: {constants.episode}\\n')",
"def run_grid_experiment(self):\n # Ask for confirmation - optional.\n if self.flags.confirm:\n input('Press any key to continue')\n\n # Check max number of child processes. \n if self.max_concurrent_runs <= 0: # We need at least one proces!\n max_processes = len(os.sched_getaffinity(0))\n else: \n # Take into account the minimum value.\n max_processes = min(len(os.sched_getaffinity(0)), self.max_concurrent_runs)\n self.logger.info('Spanning experiments using {} CPU(s) concurrently.'.format(max_processes))\n\n # Run in as many threads as there are CPUs available to the script.\n with ThreadPool(processes=max_processes) as pool:\n func = partial(GridTesterCPU.run_experiment, self, prefix=\"\")\n pool.map(func, self.experiments_list)\n\n self.logger.info('Grid test experiments finished.')",
"def run(self):\n counter = 0\n self.clear_screen()\n while self.env.remaining_boxes and counter < self.step_limit:\n if ProgramParameters.USE_GUI:\n self.clear_screen()\n print(self.env)\n counter += 1\n print('\\r', counter, len(self.env.delivered_boxes), self.find_overall_efficiency(), end='')\n if ProgramParameters.USE_GUI:\n self.wait()\n for agent in self.env.free_agents:\n agent.explore()\n for box in self.env.remaining_boxes:\n box.simulate()\n if ProgramParameters.EXCHANGE_GRAPHS:\n self.env.exchange_graphs()\n print('\\r', ' ' * 80, '\\rTime taken:', counter)\n print('Efficiency:', self.find_overall_efficiency())\n if not self.env.remaining_boxes:\n print('They did it!')\n # train on all examples of current experience\n training_losses = []\n print('Instantaneous learing')\n for network in self.networks.values():\n training_loss = network.train_from_database()\n training_losses.append(training_loss)\n # write it to file\n with open(HOME + 'data/losses', 'a+') as f:\n print(counter, file=f, end=' ')\n print(*training_losses, file=f, sep='\\t')\n if ProgramParameters.USE_EXPERIENCE_REPLAY:\n # read instances for experience replay from file\n replay_losses = []\n print('Experience replay')\n # add current experiences to pickled file of all experiences\n for network in self.networks.values():\n network.write_current_experience_to_file()\n try:\n replay_database = network.read_random_experience_file()\n replay_loss = network.experience_replay(replay_database)\n replay_losses.append(replay_loss)\n except IndexError:\n pass\n # write it to file\n with open(HOME + 'data/replay_losses', 'a+') as f:\n print(counter, file=f, end=' ')\n print(*replay_losses, file=f, sep='\\t')",
"def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1",
"def run(self):\n if self.mode == 'remote':\n raise NotImplementedError('No auto run for remote jobs.')\n finished = False\n while not finished:\n if self.current_epoch == self.nepochs:\n logger.info('Reached {} epochs. Finishing.'.format(self.current_epoch))\n finished = True\n elif self.app.available_gpus >= self.nmin:\n n_spawns = self.app.available_gpus\n if self.current_epoch == 1:\n self.app.initialize_folders()\n gen_folders = '{}/*'.format(self.app.generator_folder)\n spawn_folders = self.app.move_generators_to_input(gen_folders)\n else:\n # First move possibly finished productions to their data folder\n try:\n self.app.move_trajs_to_folder(spawn_folders)\n except:\n pass\n self.app.update_metadata()\n self.fit_model()\n self.spawns = self.respawn_from_MSM(search_type='counts', n_spawns=n_spawns)\n spawn_folders = self.app.prepare_spawns(self.spawns, self.current_epoch)\n # Plot where chosen spawns are in the tICA landscape\n f, ax = plot_tica_landscape(self.ttrajs)\n plot_spawns(self.spawns, self.ttrajs, ax=ax)\n fig_fname = '{today}_e{epoch}_spawns.pdf'.format(today=datetime.date.today().isoformat(), epoch=self.current_epoch)\n f.savefig(fig_fname)\n\n self.app.run_GPUs_bash(\n folders=spawn_folders\n )\n self.current_epoch += 1\n else:\n logger.info('{} available GPUs. Minimum per epoch is {}'.format(self.app.available_gpus, self.nmin))\n logger.info('Going to sleep for {} seconds'.format(self.sleeptime))\n sleep(self.sleeptime)",
"def main(specification_dir, out_dir, num_gpus, exps_per_gpu):\n\n # 1. Load the specifications\n specs = load_specifications(specification_dir)\n \n # 2. Create the output directory\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n if os.listdir(out_dir):\n logger.warning(\"The output directory {} is not empty. Are you sure you want to continue?\".format(out_dir))\n # time.sleep(3)\n\n # 3. Create the workers with specific environment variables\n num_workers = num_gpus * exps_per_gpu\n\n with NonDaemonPool(num_workers) as pool:\n logger.info(\"Created {} workers\".format(num_workers))\n \n # Create the available device queue.\n m = multiprocessing.Manager()\n available_devices = m.Queue()\n for g in range(num_gpus):\n for _ in range(exps_per_gpu):\n available_devices.put(g)\n\n\n # 4. Create and distribute the workload\n workload = list(sorted([\n (spec, J(out_dir, spec[\"name\"]), available_devices) for spec in specs\n ], key=lambda x: (1 + 10000*x[0]['depth'])*x[0]['width']))\n \n logger.info(\"Running {} jobs accross {} GPUs\".format(len(workload), num_gpus))\n\n # 5. Launch the workers.\n logger.info(\"Launching the workers using `run_experiment`.\")\n list(pool.imap_unordered(\n launch_experiment_on_device,\n workload\n ))\n # pool.join()\n \n logger.info(\"Success, all experiments completed!\")",
"def __execute_experiment__(self, *args, **kwargs):\n from klibs.KLGraphics import clear\n\n if self.blocks == None:\n self.blocks = self.trial_factory.export_trials()\n\n P.block_number = 0\n P.trial_id = 0\n for block in self.blocks:\n P.recycle_count = 0\n P.block_number += 1\n P.practicing = block.practice\n self.block()\n P.trial_number = 1\n for trial in block: # ie. list of trials\n try:\n P.trial_id += 1 # Increments regardless of recycling\n self.__trial__(trial, block.practice)\n P.trial_number += 1\n except TrialException:\n block.recycle()\n P.recycle_count += 1\n clear() # NOTE: is this actually wanted?\n self.rc.reset()\n self.clean_up()\n\n self.incomplete = False\n if 'session_info' in self.database.tables:\n where = {'session_number': P.session_number}\n self.database.update('session_info', {'complete': True}, where)",
"def simple_worker_loop() -> None:\n print('\\nSimple worker loop tutorial', flush=True)\n\n # the first thing to do at the start of any experiment is to initialize a few global parameters\n # these parameters are shared across the entire repo\n ps.init_globals(\n seed=0, # if None, the experiment is not seeded and would initialized differently each time\n registry=None, # if None, a registry is created and used\n # a registry does bookkeeping of all people and locations used in the experiment\n )\n\n # init locations\n home = ps.env.Home()\n work = ps.env.Office() # any subclass of BusinessLocation can be a workplace, e.g. Bar, Restaurant, Hospital, etc.\n\n # init a worker\n person = ps.env.Worker(\n person_id=ps.env.PersonID('worker', age=35), # person_id is a unique id for this person\n home=home.id, # specify the home_id that person is assigned to\n work=work.id, # specify the id of the person's workplace\n )\n\n # Init simulator\n sim = ps.env.PandemicSim(\n locations=[work, home], # a list of all locations\n persons=[person] # a list of all persons\n )\n # PandemicSim by default creates and uses randomized testing and an SEIR infection model\n\n # Iterate through steps in the simulator, where each step advances an hour\n for _ in trange(24, desc='Simulating hour'):\n sim.step()\n\n # Or iterate by advancing in days by calling step_day in the simulator\n for _ in trange(10, desc='Simulating day'):\n sim.step_day()\n\n # The above loop iterates the simulator with no movement restrictions\n # To impose restrictions, for example, Stage-2 of austin_regulations\n sim.impose_regulation(ps.sh.austin_regulations[2])\n\n # Calling step_day now will run the simulator under Stage-2 regulation\n for _ in trange(10, desc='Simulating day (Under Stage-2)'):\n sim.step_day()",
"def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)",
"def test_run_jobs(self):\n qm = QMCalculator(software='mopac',\n method='pm3',\n fileStore=self.fileStore,\n onlyCyclics=True,\n maxRadicalNumber=0,\n )\n output_directory = os.path.join(qm.settings.fileStore, '..', '..')\n qm.set_default_output_directory(output_directory)\n\n spc1 = Species().from_smiles('c1ccccc1')\n spc2 = Species().from_smiles('CC1C=CC=CC=1')\n spc_list = [spc1, spc2]\n\n qm.run_jobs(spc_list, procnum=1)",
"def run(self):\n for worker in self.simulation_workers:\n worker.start()",
"def runWork(self,benches):\r\n\r\n self.finished = 0\r\n\r\n# callbacks = []\r\n# callbacks.append(self.fetchBenchResults)\r\n# callbacks.append(self.setToWork)\r\n# callbacks.append(self.fetchResults)\r\n\r\n self.log('Signal','deferring runBenches to thread','work')\r\n\r\n# for fetcher in fetchers:\r\n# threads.deferToThread(fetcher())\r\n# self.log('Signal','Started fetcher: %s' % repr(fetcher),'runWork')\r\n\r\n# self.runBenches(callbacks)\r\n self.setToWork(benches)",
"def run(self):\n\n self.create_trials() # create them *before* running!\n self.start_experiment()\n\n for trail in self.trials:\n trial.run()\n\n self.close()",
"def run(self):\n # Create queue of experiment configurations\n queue = collections.deque(self.settings.EXPERIMENT_QUEUE)\n # Calculate number of experiments and number of processes\n self.n_exp = len(queue) * self.settings.N_REPLICATIONS\n self.n_proc = self.settings.N_PROCESSES \\\n if self.settings.PARALLEL_EXECUTION \\\n else 1\n logger.info('Starting simulations: %d experiments, %d process(es)'\n % (self.n_exp, self.n_proc))\n\n if self.settings.PARALLEL_EXECUTION:\n # This job queue is used only to keep track of which jobs have\n # finished and which are still running. Currently this information\n # is used only to handle keyboard interrupts correctly\n job_queue = collections.deque()\n # Schedule experiments from the queue\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n job_queue.append(self.pool.apply_async(run_scenario,\n args=(self.settings, experiment,\n self.seq.assign(), self.n_exp),\n callback=self.experiment_callback))\n self.pool.close()\n # This solution is probably not optimal, but at least makes\n # KeyboardInterrupt work fine, which is crucial if launching the\n # simulation remotely via screen.\n # What happens here is that we keep waiting for possible\n # KeyboardInterrupts till the last process terminates successfully.\n # We may have to wait up to 5 seconds after the last process\n # terminates before exiting, which is really negligible\n try:\n while job_queue:\n job = job_queue.popleft()\n while not job.ready():\n time.sleep(5)\n except KeyboardInterrupt:\n self.pool.terminate()\n self.pool.join()\n\n else: # Single-process execution\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n self.experiment_callback(run_scenario(self.settings,\n experiment, self.seq.assign(),\n self.n_exp))\n if self._stop:\n self.stop()\n\n logger.info('END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d',\n self.n_exp, self.n_fail + self.n_success, self.n_success, self.n_fail)",
"def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return",
"def sbm_experiments():\n # The key experiment is to fix n, r, and p and vary the ratio q/p.\n n = 1000\n average_over = 10\n\n # We will run a few experiments\n rs = [5]\n ps = [1e-11]\n run_lps = [False]\n\n # Use the same ratios for each experiment\n ratios = [0.5 * x for x in range(4, 61)]\n\n # Whether to append results to the results files\n append_results = False\n\n for index in range(len(rs)):\n sbm_experiment_internal(n, rs[index], ps[index], average_over, run_lps[index], ratios, append_results)",
"def startWorkers(self):\n for i in range(self.aOT):\n t = thr.Thread(target=self.threadWorker)\n t.start()\n self.threads.append(t)",
"def run_experiments(size, approach):\n\n # Create folder (if needed) where to store query evaluation logs and raw results\n if not os.path.isdir(f\"{path_reports}/eval-query-{current_run}\"):\n shell(f\"mkdir -p {path_reports}/eval-query-{current_run}\")\n\n # Create folder (if needed) where to store store evaluation logs and raw results\n if not os.path.isdir(f\"{path_reports}/eval-store-{current_run}\"):\n shell(f\"mkdir -p {path_reports}/eval-store-{current_run}\")\n\n # Helper function computing the path of a file/folder for the given query evaluation iteration \n def query_path(iteration, filename=None):\n folder = size[\"id\"] + \"_\" + str(iteration)\n return path_reports + \"/eval-query-\" + current_run + \"/\" + folder + (\"/\" + filename if filename != None else \"\") \n\n # Helper function computing the path of a file/folder for the given store evaluation test / cpu setting\n def store_path(test, cpu, filename=None):\n folder = size[\"id\"] + \"_\" + approach[\"id\"] + \"_\" + test + (\"_\" + cpu[\"id\"] if cpu != None else \"\")\n return path_reports + \"/eval-store-\" + current_run + \"/\" + folder + (\"/\" + filename if filename != None else \"\")\n \n # Determine whether partial traces and named graphs are supported\n partial = approach[\"supports_partial\"]\n graphs = approach[\"supports_graphs\"]\n \n # Skip setting if all data is available (check for presence of log files - delete them to repeat test)\n may_skip = (not sp_enable or not partial or not graphs or os.path.isfile(store_path(\"sp\", None, \"eval.log\")))\n may_skip = may_skip and (not sf_enable or os.path.isfile(store_path(\"sf\", None, \"eval.log\")))\n if query_enable and approach[\"id\"] == query_approach_id:\n for i in range(0, query_num_iterations):\n may_skip = may_skip and os.path.isfile(query_path(i, \"eval.log\"))\n for cpu in cpus:\n may_skip = may_skip and (not pp_enable or not partial or not graphs or os.path.isfile(store_path(\"pp\", cpu, \"eval.log\")))\n may_skip = may_skip and (not pf_enable or os.path.isfile(store_path(\"pf\", cpu, \"eval.log\")))\n if may_skip:\n return\n\n # Delete (if needed) and extract again the repository from its .tar.xz file, so to work on a clean repository (at the end of this test, the repository is no more clean)\n path_dump = path_data + \"/\" + size[\"id\"] + \"_\" + approach[\"id\"] + \".tar.lz\"\n path_repo = path_repos + \"/\" + size[\"id\"] + \"_\" + approach[\"id\"]\n if not os.path.isfile(path_dump):\n log(f\"Missing required file {path_dump}\")\n sys.exit()\n if os.path.isdir(path_repo):\n shell(f\"rm -Rf {path_repo}\")\n shell(f\"{cmd_plzip} -kdc {path_dump} | tar xf - -C {path_repos}\")\n\n # Locate the repository URL\n repo_url = f\"http://localhost:{server_port}/repositories/promo\"\n \n # Query test (if enabled)\n if query_enable and approach[\"id\"] == query_approach_id:\n for i in range(0, query_num_iterations):\n if not os.path.isfile(query_path(i, \"eval.log\")):\n shell(f\"mkdir -p {query_path(i)}\")\n shell(f\"taskset -a {query_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {query_taskset} {cmd_mokip} eval-query -w -u {repo_url} -q {path_queries} -r {query_path(i)} \" +\n f\"| tee {query_path(i, 'eval.log')}\")\n shell(f\"taskset -a {query_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Sequential Partial test (to assess store times per trace and their components)\n if sp_enable and partial and graphs and not os.path.isfile(store_path(\"sp\", None, \"eval.log\")):\n shell(f\"mkdir -p {store_path('sp', None)}\")\n shell(f\"taskset -a {sp_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {sp_taskset} {cmd_mokip} eval-store -d {path_data}/traces_pp.jsonl.gz \" + \n f\"-u {repo_url} -i {approach['inference']} -U REPLACE_GRAPH_PROTOCOL \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('sp', None)} -t 1 -w 50 -p -D \" + \n f\"| tee {store_path('sp', None, 'eval.log')}\")\n shell(f\"taskset -a {sp_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Parallel Partial (to assess throughput, varying # of CPU cores)\n for cpu in cpus:\n if pp_enable and partial and graphs and not os.path.isfile(store_path(\"pp\", cpu, \"eval.log\")):\n shell(f\"mkdir -p {store_path('pp', cpu)}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_mokip} eval-store -d {path_data}/traces_pp.jsonl.gz \" +\n f\"-u {repo_url} -i {approach['inference']} -U REPLACE_GRAPH_PROTOCOL \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" + \n f\"-T {timeout} -r {store_path('pp', cpu)} -t {max(2, cpu['num_threads'])} -w 50 -p -D \" + \n f\"| tee {store_path('pp', cpu, 'eval.log')}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} stop {path_repo}\")\n\n # Sequential Full test (to assess store times per trace and their components)\n if sf_enable and not os.path.isfile(store_path(\"sf\", None, \"eval.log\")):\n shell(f\"mkdir -p {store_path('sf', None)}\")\n shell(f\"taskset -a {sf_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {sf_taskset} {cmd_mokip} eval-store -d {path_data}/traces_sf.jsonl.gz \" + \n f\"-u {repo_url} -i {approach['inference']} -U {'APPEND' if graphs else 'APPEND_DEFAULT_GRAPH'} \" +\n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('sf', None)} -t 1 -w 50 \" + \n f\"| tee {store_path('sf', None, 'eval.log')}\")\n shell(f\"taskset -a {sf_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Parallel Full (to assess throughput where data is also deleted, varying # of CPU cores)\n for cpu in cpus:\n if pf_enable and not os.path.isfile(store_path(\"pf\", cpu, \"eval.log\")):\n update = \"APPEND\" if graphs else \"APPEND_DEFAULT_GRAPH\"\n shell(f\"mkdir -p {store_path('pf', cpu)}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_mokip} eval-store -d {path_data}/traces_pf_{cpu['id']}.jsonl.gz \" +\n f\"-u {repo_url} -i {approach['inference']} -U {'APPEND' if graphs else 'APPEND_DEFAULT_GRAPH'} \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('pf', cpu)} -t {max(2, cpu['num_threads'])} -w 50 \" + \n f\"| tee {store_path('pf', cpu, 'eval.log')}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} stop {path_repo}\")\n\n # Drop the repository (both to save space and since it is not clean anymore)\n shell(f\"rm -Rf {path_repo}\")",
"def master():\n global fileName, hyp, generation, rep_type, gen\n\n start_t = datetime.datetime.now()\n print('started at ', start_t)\n\n for iter_i in range(iter_num):\n print('iter ', iter_i)\n make_new_file(iter_i)\n\n data = DataGatherer(fileName, hyp)\n neat = Neat(hyp, rep_type)\n\n for gen in range(generation):\n pop = neat.ask() # Get newly evolved individuals from NEAT\n reward = batchMpiEval(pop, gen=neat.gen, sp_count=len(neat.species)) # Send pop to be evaluated by workers\n neat.tell(reward) # Send fitness to NEAT\n neat.probMoo() # Rank population according to objectivess\n neat.speciate() # Divide population into species\n\n data = gatherData(data,neat,gen,iter_i,hyp,savePop=True)\n print(gen, '\\t - \\t', data.display())\n print('\\t', len(neat.species))\n\n neat.gen += 1\n\n # Clean up and data gathering at run end\n data = gatherData(data,neat,gen,iter_i,hyp,savePop=False)\n data.save()\n data.savePop(neat.pop,fileName) # Save population as 2D numpy arrays\n\n print('finish at ', datetime.datetime.now())\n print('total time ', datetime.datetime.now()-start_t)\n\n stopAllWorkers()",
"def algorithm(self):\n self.logger.debug(\"Starting\")\n while(True):\n for status, worktype in states():\n limit = self.slaves.queueableTasks()\n if not self._lockWork(limit=limit, getstatus=status, setstatus='HOLDING'):\n continue\n pendingwork = self._getWork(limit=limit, getstatus='HOLDING')\n self.logger.info(\"Retrieved a total of %d %s works\" %(len(pendingwork), worktype))\n self.logger.debug(\"Retrieved the following works: \\n%s\" %(str(pendingwork)))\n self.slaves.injectWorks([(worktype, work, None) for work in pendingwork])\n for task in pendingwork:\n self.updateWork(task['tm_taskname'], 'QUEUED')\n self.logger.info('Worker status:')\n self.logger.info(' - free slaves: %d' % self.slaves.freeSlaves())\n self.logger.info(' - acquired tasks: %d' % self.slaves.queuedTasks())\n self.logger.info(' - tasks pending in queue: %d' % self.slaves.pendingTasks())\n\n finished = self.slaves.checkFinished()\n self.updateFinished(finished)\n if self.TEST:\n #if we are testing we just do one cycle\n break\n\n time.sleep(self.config.TaskWorker.polling)\n\n self.logger.debug(\"Stopping\")",
"def worker(worker_idx: int, work_queue: Queue, result_queue: Queue):\n game = self.get_env()\n predictor = self.get_model(game)\n msg.good(f\"Worker {worker_idx} started.\")\n\n while (\n ParallelPracticeRunner.request_quit is False\n and work_queue.empty() is False\n ):\n episode, args = work_queue.get()\n start = time.time()\n try:\n (\n episode_examples,\n episode_reward,\n is_win,\n problem,\n ) = self.execute_episode(\n episode,\n game,\n predictor,\n is_verbose_worker=worker_idx == 0,\n **args,\n )\n except KeyboardInterrupt:\n break\n except Exception as e:\n err = print_error(e, f\"Self-practice episode threw\")\n result_queue.put((i, [], {\"error\": err}))\n continue\n duration = time.time() - start\n episode_summary = EpisodeSummary(\n complexity=problem.complexity,\n text=problem.text,\n reward=episode_reward,\n solved=bool(is_win),\n duration=duration,\n )\n result_queue.put((i, episode_examples, episode_summary))\n return 0",
"def worker(scenes, cap_templates, ques_templates, worker_id, out_q):\n\n dialogs = []\n for index, scene in enumerate(scenes):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % \\\n (cur_time, worker_id, index, len(scenes), scene['image_index']))\n try:\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(json.loads(json.dumps(gen_dialog)))\n except:\n print('NOTE: Missing data for %d' % scene['image_index'])\n out_q.put({worker_id: dialogs})",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def main(args: argparse) -> None:\n threads = args.threads\n\n for thread in range(threads):\n canvas = canvas_init(args, thread)\n seeds = tools.load_seeds_from_file(args.seed, args.manual_seed)\n\n # Main process\n canvas_thread = threading.Thread(target=run,\n args=(args, canvas, seeds, thread,\n threads))\n canvas_thread.start()",
"def main():\n\n if not os.path.isdir('./results'):\n # results directory is needed\n os.mkdir('./results')\n\n # Run bess daemon\n print('start bess daemon')\n ret = bessctl_do('daemon start')\n if ret.returncode != 0:\n print('failed to start bess daemon')\n return 1\n\n #sleep(2)\n\n cnt_prt_q = [(2,2), (4,2), (8, 2), (2, 8), (4, 8), (8, 8), (16, 8)]\n cnt_prt_q = [(2,128),]\n # cnt_prt_q = [0]\n # Warning: SINGLE_PMD_MULTIPLE_Q is not supported any more.\n # (it needs EXCESS variable to be defined)\n exp_types = ['MULTIPLE_PMD_MULTIPLE_Q',] # 'SINGLE_PMD_MULTIPLE_Q']\n agents = ['BKDRFT', 'BESS']\n agents = ['BKDRFT',]\n for _type in exp_types:\n for agent in agents:\n results = []\n for cnt_ports, cnt_queues in cnt_prt_q:\n res = run_exp(_type, agent, cnt_ports, cnt_queues)\n results.append(res)\n generate_report_file(results,\n './results/{}_{}_results.txt'.format(_type, agent))",
"def logging_loop(self, num_gpus):\n # Launch the test worker to get performance metrics\n self.test_worker = self_play.SelfPlay.options(\n num_cpus=0, num_gpus=num_gpus,\n ).remote(\n self.checkpoint,\n self.Game,\n self.config,\n self.config.seed + self.config.num_workers,\n )\n self.test_worker.continuous_self_play.remote(\n self.shared_storage_worker, None, True\n )\n\n # Write everything in TensorBoard\n writer = SummaryWriter(self.config.results_path)\n\n print(\n \"\\nTraining...\\nRun tensorboard --logdir ./results and go to http://localhost:6006/ to see in real time the training performance.\\n\"\n )\n\n # Save hyperparameters to TensorBoard\n hp_table = [\n f\"| {key} | {value} |\" for key, value in self.config.__dict__.items()\n ]\n writer.add_text(\n \"Hyperparameters\",\n \"| Parameter | Value |\\n|-------|-------|\\n\" + \"\\n\".join(hp_table),\n )\n # Save model representation\n writer.add_text(\n \"Model summary\", self.summary,\n )\n # Loop for updating the training performance\n counter = 0\n keys = [\n \"total_reward\",\n \"wormzero_reward\",\n \"opponent_reward\",\n \"episode_length\",\n \"mean_value\",\n \"training_step\",\n \"lr\",\n \"total_loss\",\n \"value_loss\",\n \"policy_loss\",\n \"num_played_games\",\n \"num_played_steps\",\n \"num_reanalysed_games\",\n ]\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n try:\n while info[\"training_step\"] < self.config.training_steps:\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n writer.add_scalar(\n \"1.Total_reward/1.Total_reward\", info[\"total_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/2.Mean_value\", info[\"mean_value\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/3.Episode_length\", info[\"episode_length\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/4.WormZero_reward\", info[\"wormzero_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/5.Opponent_reward\",\n info[\"opponent_reward\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/1.Self_played_games\", info[\"num_played_games\"], counter,\n )\n writer.add_scalar(\n \"2.Workers/2.Training_steps\", info[\"training_step\"], counter\n )\n writer.add_scalar(\n \"2.Workers/3.Self_played_steps\", info[\"num_played_steps\"], counter\n )\n writer.add_scalar(\n \"2.Workers/4.Reanalysed_games\",\n info[\"num_reanalysed_games\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/5.Training_steps_per_self_played_step_ratio\",\n info[\"training_step\"] / max(1, info[\"num_played_steps\"]),\n counter,\n )\n writer.add_scalar(\"2.Workers/6.Learning_rate\", info[\"lr\"], counter)\n writer.add_scalar(\n \"3.Loss/1.Total_weighted_loss\", info[\"total_loss\"], counter\n )\n writer.add_scalar(\"3.Loss/Value_loss\", info[\"value_loss\"], counter)\n writer.add_scalar(\"3.Loss/Policy_loss\", info[\"policy_loss\"], counter)\n print(\n f'Last test reward: {info[\"total_reward\"]:.2f}. Training step: {info[\"training_step\"]}/{self.config.training_steps}. Played games: {info[\"num_played_games\"]}. Loss: {info[\"total_loss\"]:.2f}',\n end=\"\\r\",\n )\n counter += 1\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n self.terminate_workers()\n\n if self.config.save_model:\n # Persist replay buffer to disk\n print(\"\\n\\nPersisting replay buffer games to disk...\")\n pickle.dump(\n {\n \"buffer\": self.replay_buffer,\n \"num_played_games\": self.checkpoint[\"num_played_games\"],\n \"num_played_steps\": self.checkpoint[\"num_played_steps\"],\n \"num_reanalysed_games\": self.checkpoint[\"num_reanalysed_games\"],\n },\n open(os.path.join(self.config.results_path, \"replay_buffer.pkl\"), \"wb\"),\n )",
"def run_worker(self):\n\n # exec(open('restarter.py').read())\n # sys.exit()\n self.update_session_state()\n currentTime = QTime().currentTime()\n fromTime = QTime(int(self.settings.TECHFROMHOUR), int(self.settings.TECHFROMMIN))\n toTime = QTime(int(self.settings.TECHTOHOUR), int(self.settings.TECHTOMIN))\n sessionState = self.lblMarket.text()\n\n if fromTime < currentTime < toTime:\n print(\"Worker skept-Technical break : \", fromTime.toString(\"hh:mm\"), \" to \", toTime.toString(\"hh:mm\"))\n self.update_console(\"Technical break untill \" + toTime.toString(\"hh:mm\"))\n\n else:\n self.update_console(\"Starting Worker- UI Paused\")\n self.uiTimer.stop() # to not cause an errors when lists will be resetted\n worker = Worker(\n self.ibkrworker.process_positions_candidates) # Any other args, kwargs are passed to the run function\n worker.signals.result.connect(self.update_ui)\n worker.signals.status.connect(self.update_status)\n worker.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(worker)",
"def run_sc(no_prods, prev_ledg_update, list_of_workers, no_prod):\n \n list_of_rands = []\n\n for worker_info in reversed(list_of_workers):\n print(worker_info[0])\n if check_fees(worker_info[3]) == True:\n print(\"Worker \", worker_info[0], \"paid their fees\")\n\n elif check_fees(worker_info[3]) == False:\n \n print(\"Worker \", worker_info[0], \"did not pay their fees\")\n list_of_workers.remove(worker_info)\n \n continue \n \n if check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == True:\n print(\"Worker \", worker_info[0], \"has a well formed random\")\n \n\n elif check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == False:\n print(\"Worker \", worker_info[0], \"failed to produce a well formed random\")\n list_of_workers.remove(worker_info)\n\n continue\n \n\n list_of_rands.append(worker_info[1])\n\n global_rand = gen_big_rand(list_of_rands)\n\n if global_rand == 0:\n print(\"Something went wrong global_rand was 0\")\n\n dist_list = get_dist_from_big_rand(global_rand, list_of_workers) \n PIDs = find_prod_ids(dist_list, no_prod)\n\n for producer in PIDs:\n print (\"Worker -->\", producer, \"has been selected as a producer for this cycle\")",
"def main():\n accessCount = int(input(\"Enter the number of accesses: \"))\n numWriters = int(input(\"Enter the number of writers: \"))\n numReaders = int(input(\"Enter the number of readers: \"))\n\n sleepMax = 4\n\n counter = Counter(0)\n print(\"counter defined at:\", str(counter.count))\n cell = SharedCell(counter)\n print(\"shared counter data:\", str(cell.data.count))\n\n writerList = []\n readerList = []\n for cnt in range(numWriters):\n writerList.append(Writer(cell, accessCount, sleepMax, cnt + 1))\n for cnt in range(numReaders):\n readerList.append(Reader(cell, accessCount, sleepMax, cnt + 1))\n\n print(\"Starting the threads\")\n for writer in writerList:\n writer.start()\n for reader in readerList:\n reader.start()",
"def start_workers(self):\n\n for thread in self.threads:\n thread.start()"
]
| [
"0.630366",
"0.6252322",
"0.6240117",
"0.60842514",
"0.6068625",
"0.60128623",
"0.5983428",
"0.59683526",
"0.5961476",
"0.59357417",
"0.59309727",
"0.58802766",
"0.5866607",
"0.5844822",
"0.5843062",
"0.58358",
"0.58219063",
"0.58118093",
"0.57361317",
"0.5730246",
"0.572647",
"0.5721557",
"0.5718303",
"0.5710016",
"0.56940955",
"0.5672499",
"0.56571054",
"0.5643291",
"0.5643268",
"0.5632792"
]
| 0.6466254 | 0 |
Helper function to change current state's video frame to input tensor for nn | def vid2tensor( self, current_frame): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_input_tensor(image):\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(self, image):\n tensor_index = self.model.get_input_details()[0]['index']\n input_tensor = self.model.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def _set_input_tensor(self, image):\n tensor_index = self._interpreter.get_input_details()[0]['index']\n input_tensor = self._interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(self, image):\n tensor_index = self.interpreter.get_input_details()[0]['index']\n input_tensor = self.interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0][\"index\"]\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0][\"index\"]\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0][\"index\"]\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image",
"def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor",
"def state_tensor_convert(self,state):\n return torch.Tensor(state)",
"def preprocess_frame(self, frame):\n state = torch.Tensor(frame)\n return gpuify(state, self.gpu_id)",
"def forward(self, inp, state):\n emb = self.drop(self.encoder(inp))\n y, state_next = self.rnn(emb, state)\n y = self.drop(y)\n y = self.decoder(y)\n return y, state_next",
"def forward_tensor(self, x):\n pass",
"def __call__(self, tensor): \n seed = random.random()\n if seed > 0.5:\n return tensor\n else:\n is_tensor = tensor is torch.Tensor\n if is_tensor:\n tensor = tensor.numpy()\n\n if len(tensor.shape) == 4:\n # Deal with rank=5 situation (BTCHW)\n video_sequence = []\n for frame in tensor:\n video_sequence.append(cv2.flip(frame, 0))\n else:\n raise Exception(\"This function don't support the input whose rank is not 4...\")\n \n # Transfer back to torch.Tensor if needed\n video_sequence = np.asarray(video_sequence)\n if is_tensor:\n video_sequence = torch.from_numpy(video_sequence)\n return video_sequence",
"def train_set_input(self, input):\r\n self.real_video = input['video'].cuda()\r\n self.source_image = input['trimmed'].cuda()\r\n self.source_image_label = input['trimmed_label'].cuda()\r\n self.video_label = input['video_label'].cuda()",
"def forward(self, input, frame_index):\n losses = []\n offsets= []\n filters = []\n occlusions = []\n\n device = torch.cuda.current_device()\n # print(device)\n # s1 = torch.cuda.Stream(device=device, priority=5)\n # s2 = torch.cuda.Stream(device=device, priority=10) #PWC-Net is slow, need to have higher priority\n s1 = torch.cuda.current_stream()\n s2 = torch.cuda.current_stream()\n\n '''\n STEP 1: sequeeze the input \n '''\n if self.training == True:\n if self.temporal== False:\n assert input.size(0) == 3\n input_0,input_1,input_2 = torch.squeeze(input,dim=0) # input_2 middle\n input_3,input_4,input_5,input_6 = [],[],[],[]\n else:\n assert input.size(0) == 7\n input_0,input_1,input_2, input_3, input_4, input_5,input_6 = \\\n torch.squeeze(input,dim=0)\n else:\n if self.temporal == False:\n assert input.size(0) ==2\n input_0,input_2 = torch.squeeze(input,dim=0)\n input_1, input_3,input_4,input_5,input_6 = [],[],[],[],[]\n else:\n assert input.size(0) == 4\n input0,input_2,input_4,input_6 = torch.sequeeze(input,dim= 0)\n input_1,input_3,input_5,input7 = [],[],[],[]\n\n\n '''\n STEP 2: initialize the auxiliary input either from temporal or scale predecessor\n '''\n pre_scale_offset, pre_scale_filter, pre_scale_occlusion = None, None, None\n if self.temporal:\n pre_scale_offset_c, pre_scale_filter_c, pre_scale_occlusion_c = None, None, None\n pre_scale_offset_n, pre_scale_filter_n, pre_scale_occlusion_n = None, None, None\n\n '''\n STEP 3: iteratively execuate the Multiscale Network \n '''\n # from the coarser scale to the most\n for i in range(self.scale_num):\n\n '''\n STEP 3.1: prepare current scale inputs\n '''\n #prepare the input data of current scale\n cur_input_0 = F.avg_pool2d(input_0,pow(self.scale_ratio,self.scale_num - i - 1))\n if self.training == True:\n cur_input_1 = F.avg_pool2d(input_1,pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_2 = F.avg_pool2d(input_2,pow(self.scale_ratio,self.scale_num - i - 1))\n if self.temporal == True:\n # frame 3 is the central frame to be interpolated.\n if self.training == True:\n cur_input_3 = F.avg_pool2d(input_3, pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_4 = F.avg_pool2d(input_4, pow(self.scale_ratio,self.scale_num - i - 1))\n if self.training== True:\n cur_input_5 = F.avg_pool2d(input_5, pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_6 = F.avg_pool2d(input_6, pow(self.scale_ratio,self.scale_num - i - 1))\n\n '''\n STEP 3.2: concatenating the inputs.\n '''\n if i == 0:\n cur_offset_input = torch.cat((cur_input_0, cur_input_2), dim=1)\n cur_filter_input = cur_offset_input # torch.cat((cur_input_0, cur_input_2), dim=1)\n # cur_occlusion_input = cur_offset_input # torch.cat((cur_input_0, cur_input_2), dim=1)\n\n if self.temporal==True:\n # the central part\n cur_offset_input_c = torch.cat((cur_input_2,cur_input_4),dim = 1)\n cur_filter_input_c = cur_offset_input_c #torch.cat((cur_input_2,cur_input_4),dim =1)\n # cur_occlusion_input_c = cur_offset_input_c #torch.cat((cur_input_2,cur_input_4),dim =1)\n # the next part\n cur_offset_input_n = torch.cat((cur_input_4,cur_input_6),dim = 1)\n cur_filter_input_n = cur_offset_input_n# torch.cat((cur_input_4,cur_input_6),dim = 1)\n # cur_occlusion_input_n = cur_offset_input_n #torch.cat((cur_input_4,cur_input_6),dim = 1)\n # # to compose a enlarged batch with the three parts\n # cur_offset = torch.cat((cur_offset, cur_offset_c, cur_offset_n), dim=0)\n # cur_filter = torch.cat((cur_filter, cur_filter_c,cur_filter_n), dim=0)\n # cur_occlusion = torch.cat((cur_occlusion,cur_occlusion_c, cur_occlusion_n), dim=0)\n else:\n cur_offset_input = torch.cat((cur_input_0,cur_input_2,pre_scale_offset),dim=1)\n cur_filter_input = torch.cat((cur_input_0,cur_input_2,pre_scale_filter),dim =1)\n # cur_occlusion_input = torch.cat((cur_input_0,cur_input_2,pre_scale_occlusion),dim=1)\n\n if self.temporal ==True:\n cur_offset_input_c = torch.cat((cur_input_2, cur_input_4,pre_scale_offset_c),dim=1)\n cur_filter_input_c = torch.cat((cur_input_2,cur_input_4, pre_scale_filter_c),dim =1 )\n # cur_occlusion_input_c = torch.cat((cur_input_2,cur_input_4,pre_scale_occlusion_c),dim = 1)\n\n cur_offset_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_offset_n),dim=1)\n cur_filter_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_filter_n),dim=1)\n # cur_occlusion_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_occlusion_n),dim=1)\n\n # # to compose a enlarged batch with the three parts\n # cur_offset = torch.cat((cur_offset, cur_offset_c, cur_offset_n), dim=0)\n # cur_filter = torch.cat((cur_filter, cur_filter_c,cur_filter_n), dim=0)\n # cur_occlusion = torch.cat((cur_occlusion,cur_occlusion_c, cur_occlusion_n), dim=0)\n\n '''\n STEP 3.3: perform the estimation by the Three subpath Network \n '''\n if i ==0 :\n\n time_offsets = [ kk * self.timestep for kk in range(1, 1+self.numFrames,1)]\n\n if len(time_offsets) == 1:\n frame_index = [0]\n\n # always set depthNet to evaluation mode without optimizing its parameters.\n # self.depthNet = self.depthNet.eval()\n\n with torch.cuda.stream(s1):\n temp = self.depthNet(torch.cat((cur_filter_input[:, :3, ...],\n cur_filter_input[:, 3:, ...]),dim=0))\n log_depth = [temp[:cur_filter_input.size(0)], temp[cur_filter_input.size(0):]]\n\n # print(\"depth estimation time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n\n # log_depth = [self.depthNet(cur_filter_input[:, :3, ...]),\n # self.depthNet(cur_filter_input[:, 3:, ...])]\n # combine the depth with context to\n cur_ctx_output = [\n torch.cat((self.ctxNet(cur_filter_input[:, :3, ...]),\n log_depth[0].detach()), dim=1),\n torch.cat((self.ctxNet(cur_filter_input[:, 3:, ...]),\n log_depth[1].detach()), dim=1)\n ]\n # print(\"context extraction time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n temp = self.forward_singlePath(self.initScaleNets_filter, cur_filter_input, 'filter')\n cur_filter_output = [self.forward_singlePath(self.initScaleNets_filter1, temp, name=None),\n self.forward_singlePath(self.initScaleNets_filter2, temp, name=None)]\n\n # print(\"filter estimation time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n # temp = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input,'occlusion')\n # cur_occlusion_output = [self.forward_singlePath(self.initScaleNets_occlusion1,temp,name=None),\n # self.forward_singlePath(self.initScaleNets_occlusion2,temp,name=None)]\n\n depth_inv = [1e-6 + 1 / torch.exp(d) for d in log_depth]\n\n with torch.cuda.stream(s2):\n # use the occlusion as the depthmap outpu\n for _ in range(1):\n cur_offset_outputs = [\n self.forward_flownets(self.flownets, cur_offset_input, time_offsets=time_offsets, # F_0_t\n flowmethod=self.flowmethod),\n self.forward_flownets(self.flownets, torch.cat((cur_offset_input[:, 3:, ...], # F_1_t\n cur_offset_input[:, 0:3, ...]), dim=1),\n time_offsets=time_offsets[::-1],\n flowmethod=self.flowmethod)\n ]\n\n torch.cuda.synchronize() #synchronize s1 and s2\n\n for _ in range(1):\n cur_offset_outputs = [\n self.FlowProject(cur_offset_outputs[0],depth_inv[0],\n self.FlowProjection_threshhold,\n refinputs=[cur_offset_input[:,0:3,...],cur_offset_input[:,3:,...]] ),\n self.FlowProject(cur_offset_outputs[1],depth_inv[1],\n self.FlowProjection_threshhold,refinputs=[ cur_offset_input[:,3:,...], cur_offset_input[:,0:3,...]])\n ]\n\n # print(\"flow estimation time\")\n # print(time.time() - lasttime)\n\n # lasttime = time.time()\n depth_inv_maxreg = [d / torch.max(d) for d in depth_inv]\n cur_occlusion_output = [\n depth_inv_maxreg[0],depth_inv_maxreg[1]\n # Variable(torch.cuda.FloatTensor().resize_(cur_filter_input.size(0), 1, cur_filter_input.size(2),\n # cur_filter_input.size(3)).zero_()),\n # Variable(torch.cuda.FloatTensor().resize_(cur_filter_input.size(0), 1, cur_filter_input.size(2),\n # cur_filter_input.size(3)).zero_()),\n # 0.5 * Variable(torch.ones(cur_filter_input.size(0),1,cur_filter_input.size(2),cur_filter_input.size(3)).type(cur_filter_input.data.type())),\n # 0.5 * Variable(torch.ones(cur_filter_input.size(0),1,cur_filter_input.size(2),cur_filter_input.size(3)).type(cur_filter_input.data.type())),\n ]\n\n\n if self.temporal:\n cur_offset_output_c = self.forward_singlePath(self.initScaleNets_offset,cur_offset_input_c)\n cur_offset_output_n = self.forward_singlePath(self.initScaleNets_offset,cur_offset_input_n)\n\n cur_filter_output_c = self.forward_singlePath(self.initScaleNets_filter, cur_filter_input_c)\n cur_filter_output_n = self.forward_singlePath(self.initScaleNets_filter,cur_filter_input_n)\n\n cur_occlusion_output_c = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input_c)\n cur_occlusion_output_n = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input_n)\n else:\n cur_offset_output = self.forward_singlePath(self.iterScaleNets_offset, cur_offset_input)\n cur_filter_output = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input)\n cur_occlusion_output = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input)\n if self.temporal:\n cur_offset_output_c = self.forward_singlePath(self.iterScaleNets_offset,cur_offset_input_c)\n cur_offset_output_n = self.forward_singlePath(self.iterScaleNets_offset,cur_offset_input_n)\n\n cur_filter_output_c = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input_c)\n cur_filter_output_n = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input_n)\n\n # cur_occlusion_output_c = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input_c)\n # cur_occlusion_output_n = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input_n)\n\n '''\n STEP 3.4: perform the frame interpolation process \n '''\n\n\n\n timeoffset = time_offsets[frame_index[0]]\n temp_0 = cur_offset_outputs[0][frame_index[0]]\n temp_1 = cur_offset_outputs[1][frame_index[0]]\n cur_offset_output = [temp_0, temp_1]\n ctx0, ctx2 = self.FilterInterpolate_ctx(cur_ctx_output[0],cur_ctx_output[1],cur_offset_output,cur_filter_output, timeoffset)\n\n cur_output, ref0, ref2 = self.FilterInterpolate(cur_input_0, cur_input_2, cur_offset_output,\n cur_filter_output, self.filter_size ** 2,\n timeoffset)\n\n cur_occlusion_output = self.Interpolate_ch(cur_occlusion_output[0], cur_occlusion_output[1],\n cur_offset_output, 1)\n\n rectify_input = torch.cat((cur_output, ref0, ref2,\n cur_offset_output[0], cur_offset_output[1],\n cur_filter_output[0], cur_filter_output[1],\n ctx0, ctx2\n ), dim=1)\n\n cur_output_rectified = self.rectifyNet(rectify_input) + cur_output\n\n\n if self.temporal ==True:\n cur_output_c = self.Interpolate(cur_input_2,cur_input_4,cur_offset_output_c,cur_filter_output_c,cur_occlusion_output_c)\n cur_output_n = self.Interpolate(cur_input_4,cur_input_6,cur_offset_output_n,cur_filter_output_n,cur_occlusion_output_n)\n\n temp, forward = torch.split(cur_offset_output, 2, dim=1)\n forward = -forward\n backward, temp = torch.split(cur_offset_output_n,2,dim=1)\n backward = -backward\n\n cur_offset_sym = torch.cat((forward,backward),dim = 1)\n cur_filter_sym = cur_filter_output\n cur_occlusion_sym = cur_occlusion_output\n cur_output_sym = self.Interpolate(cur_input_2,cur_input_4,cur_offset_sym, cur_filter_sym,cur_occlusion_sym)\n\n\n '''\n STEP 3.5: for training phase, we collect the variables to be penalized.\n '''\n if self.training == True:\n losses +=[cur_output - cur_input_1]\n losses += [cur_output_rectified - cur_input_1] \n offsets +=[cur_offset_output]\n filters += [cur_filter_output]\n occlusions += [cur_occlusion_output]\n if self.temporal == True:\n losses+= [cur_output_c - cur_input_3]\n losses+= [cur_output_n - cur_input_5]\n losses+= [cur_output_c - cur_output_sym]\n\n '''\n STEP 3.6: prepare inputs for the next finer scale\n '''\n if self.scale_num > 1:\n ## prepare for the next finer scale's requirements.\n pre_scale_offset = F.upsample(cur_offset_output * self.scale_ratio, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_filter = F.upsample(cur_filter_output, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_occlusion = F.upsample(cur_offset_output, scale_factor=self.scale_ratio,mode='bilinear')\n if self.temporal == True:\n pre_scale_offset_c = F.upsample(cur_offset_output_c * self.scale_ratio, scale_factor= self.scale_ratio,mode='bilinear')\n pre_scale_filter_c = F.upsample(cur_filter_output_c, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_occlusion_c = F.upsample(cur_occlusion_output_c, scale_factor=self.scale_ratio,mode='bilinear')\n\n pre_scale_offset_n = F.upsample(cur_offset_output_n * self.scale_ratio, scale_factor= self.scale_ratio,mode='bilinear')\n pre_scale_filter_n = F.upsample(cur_filter_output_n, scale_factor=self.scale_ratio, mode='bilinear')\n pre_scale_occlusion_n = F.upsample(cur_occlusion_output_n, scale_factor=self.scale_ratio, mode='bilinear')\n\n '''\n STEP 4: return the results\n '''\n if self.training == True:\n\n return losses, offsets,filters,occlusions\n else:\n # if in test phase, we directly return the interpolated frame\n if self.temporal == False:\n cur_outputs = [cur_output,cur_output_rectified]\n return cur_outputs,cur_offset_output,cur_filter_output,cur_occlusion_output\n else:\n return cur_output_c, cur_output_sym",
"def video_to_tensor(pic):\n return torch.from_numpy(pic.transpose([3,0,1,2]))",
"def video_to_tensor(pic):\n return torch.from_numpy(pic.transpose([3, 0, 1, 2]))",
"def forward(self, src, device):\n\n src = torch.as_tensor(src).float().to(device)\n\n\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n c0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n\n # shape of lstm_out: [batch_size, input_size, hidden_dim]\n # shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).\n lstm_out, self.hidden = self.lstm(src, (h0, c0)) \n \n # Only take the output from the final timetep\n # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction\n #print(lstm_out.size())\n y_pred = self.linear(lstm_out[:, -1, :].view(src.shape[0], -1))\n return y_pred",
"def forward(self, input_var: Tensor, hidden: Optional[HiddenDict] = None, **additional: Dict) -> RecurrentOutput:\n ...",
"def forward(self, x: Tensor) -> Tensor: # type: ignore\n x = self.backbone(x)\n x = x.view(x.size(0), -1)\n if self.head_layers is not None:\n out = self.imagehead(x)\n return out\n else:\n return x",
"def forward(self, observation: Tensor) -> Tensor:\n pass",
"def forward(self, x):\n x = tensor(x).unsqueeze(1)\n x = self.cnn(x)\n\n # LSTM from here\n batch_size = x.shape[0]\n x = x.view(batch_size, x.shape[1] * x.shape[2], x.shape[3])\n x = x.permute(2, 0, 1) # Converting from (B,H,W)->(W,B,H)\n\n output = self.rnn(x)\n return output",
"def forward(self, inputs, mode):\n self.hidden = self.init_hidden() \n\n if mode == 'a':\n\n embedding_word = self.embedding(inputs).view(self.embedding.shape.size())\n lstm_out, self.hidden = self.lstm_words(embedding_word.view(len(inputs), 1, -1), self.hidden)\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n if mode == 'b':\n \n embed_chars = self.embedding_chars(inputs).view(self.embedding_chars.shape.size()) \n lstm_out_chars, self.hidden = self.lstm_chars(embed_chars.view(len(inputs), 1, -1),self.hidden) \n softmax_out = self.softmax(self.fc(lstm_out_chars))\n\n if mode == 'c': \n\n embedding_prefix = self.embeds_prefix(inputs[0]).view(self.embeds_prefix.shape.size())\n lstm_out_prefix, self.hidden = self.lstm_prefix(embedding_prefix.view(len(inputs[0]), 1, -1), self.hidden)\n embedding_suffix = self.embeds_suffix(inputs[1]).view(self.embeds_suffix.shape.size())\n lstm_out_suffix, self.hidden = self.lstm_suffix(embedding_suffix.view(len(inputs[1]), 1, -1), self.hidden)\n lstm_out = lstm_out_prefix+lstm_out_suffix\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n if mode == 'd': \n\n embedding_c_words = self.embedding_concat_words(inputs[0]).view(self.embedding_concat_words.shape.size())\n embedding_c_chars = self.embedding_concat_chars(inputs[1]).view(self.embedding_concat_chars.shape.size())\n concat_input = torch.cat((embedding_c_words, embedding_c_chars),1)\n lstm_out, self.hidden = self.lstm_concat(concat_input.view(100 , 1, -1), self.hidden)\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n return softmax_out, self.hidden",
"def forward(self,\n img,\n x,\n init_states=None):\n img_feature = self.cnn(img)\n #x = self.linear_in(x)\n x = self.embedding(x)\n #print(x.shape)\n bs, seq_sz, _ = x.size()\n hidden_seq = []\n \n if init_states is None:\n h_t, c_t = (\n torch.zeros(bs, self.hidden_size).to(x.device),\n torch.zeros(bs, self.hidden_size).to(x.device),\n )\n else:\n h_t, c_t = init_states\n\n x_t = img_feature.reshape(bs, self.hidden_size)\n i_t = torch.sigmoid(img_feature + h_t @ self.V_i + self.b_i)\n f_t = torch.sigmoid(img_feature + h_t @ self.V_f + self.b_f)\n g_t = torch.tanh(img_feature + h_t @ self.V_c + self.b_c)\n o_t = torch.sigmoid(img_feature + h_t @ self.V_o + self.b_o)\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * torch.tanh(c_t) \n # hidden_seq.append(h_t.unsqueeze(0)) \n\n for t in range(seq_sz):\n x_t = x[:, t, :] # 4*512\n # print(x_t.shape)\n #x_t = self.bn1(x_t)\n i_t = torch.sigmoid(x_t @ self.U_i + h_t @ self.V_i + self.b_i)\n f_t = torch.sigmoid(x_t @ self.U_f + h_t @ self.V_f + self.b_f)\n g_t = torch.tanh(x_t @ self.U_c + h_t @ self.V_c + self.b_c)\n o_t = torch.sigmoid(x_t @ self.U_o + h_t @ self.V_o + self.b_o)\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * torch.tanh(c_t)# 4* 512\n\n hidden_seq.append(h_t.unsqueeze(0))\n \n #reshape hidden_seq p/ retornar\n hidden_seq = torch.cat(hidden_seq, dim=0)\n hidden_seq = hidden_seq.transpose(0, 1).contiguous()\n hidden_seq = self.linear_out(hidden_seq)\n\n seq_pred = self.softmax(hidden_seq)\n return seq_pred, (h_t, c_t)",
"def update(self, incoming):\n try:\n self.incoming, self.incoming_shape = get_input(incoming)\n except ValueError:\n # We might deal with an unkown shape here, therefore keep the old known shape as incoming_shape\n try:\n self.incoming = incoming.get_output\n except AttributeError:\n self.incoming = lambda **kwargs: incoming\n \n with tf.variable_scope(self.layer_scope):\n self.out = self.incoming()",
"def forward(self, frames):\n # global low_level_representation, semantic_representation\n batch_size, time_steps, height, width, channels = frames.get_shape()\n\n with tf.name_scope(name=self.name):\n if self.encoder_mode == 'ConvLSTM':\n conv_lstm_cell = cell.ConvLSTMCell(shape=[height, width], filters=channels, kernel=[3, 3], normalize=True)\n fw_output, fw_state = rnn_inference(frames, conv_lstm_cell, 'forward_bilstm', False, 'conv')\n bw_output, bw_state = rnn_inference(frames, conv_lstm_cell, 'backward_bilstm', True, 'conv')\n # aggregate the output\n encoder_output = fw_output + bw_output # (128, 32, 224, 224, 6)\n\n # construct encoder state, but check whether it is valid to directly add the cell and hidden\n encoder_cell = fw_state[0] + bw_state[0]\n encoder_hidden = fw_state[1] + bw_state[1]\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(encoder_cell, encoder_hidden)\n print(encoder_state)\n\n # use 3d convolutional block before the bidirectional lstm\n elif self.encoder_mode == 'conv2lstm':\n conv_out = cnn_inference(frames, name='conv') # [batch, time, 512]\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_units)\n fw_output, fw_state = rnn_inference(conv_out, lstm_cell, 'forward_bilstm', False, 'linear')\n bw_output, bw_state = rnn_inference(conv_out, lstm_cell, 'backward_bilstm', True, 'linear')\n\n # aggregate the output\n encoder_output = fw_output + bw_output # (128, 32, 1200)\n\n # construct encoder state, but check whether it is valid to directly add the cell and hidden\n encoder_cell = fw_state[0] + bw_state[0]\n encoder_hidden = fw_state[1] + bw_state[1]\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(encoder_cell, encoder_hidden)\n\n elif self.encoder_mode == 'C3D':\n pass\n\n return encoder_output, encoder_state",
"def forward(self, inp):\n self.rnn.flatten_parameters() # Enables faster multi-GPU training.\n\n rnn_output, _ = self.rnn(inp.transpose(1, -1))\n rnn_output = self.dropout(rnn_output)\n\n return rnn_output.transpose(1, -1)",
"def forward(self, x):\n if self.training:\n x = self.input_pert(x)\n x = self.encoder(x)\n x = self.decoder(x)\n return x",
"def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues"
]
| [
"0.6438571",
"0.64079285",
"0.63817257",
"0.63208973",
"0.63205355",
"0.62463313",
"0.62463313",
"0.62463313",
"0.6121116",
"0.6089716",
"0.604558",
"0.60415477",
"0.6013444",
"0.5952376",
"0.5912326",
"0.5900172",
"0.5891946",
"0.583032",
"0.5830268",
"0.57746786",
"0.57531965",
"0.5744141",
"0.5732329",
"0.5717393",
"0.5654439",
"0.5638127",
"0.5626711",
"0.5626561",
"0.5617365",
"0.5595186"
]
| 0.78311425 | 0 |
Wait for the replica to become AVAILABLE on the given RSE as a result of a pending transfer | def __wait_for_replica_transfer(dst_rse_id, scope, name, max_wait_seconds=MAX_POLL_WAIT_SECONDS, transfertool=None):
replica = {}
for _ in range(max_wait_seconds):
poller(once=True, older_than=0, partition_wait_time=0, transfertool=transfertool)
finisher(once=True, partition_wait_time=0)
replica = replica_core.get_replica(rse_id=dst_rse_id, scope=scope, name=name)
if replica['state'] != ReplicaState.COPYING:
break
time.sleep(1)
return replica | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')",
"def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()",
"def waitUntilSuccess():",
"def _wait_expiration(self, raiden, transfer, sleep=DEFAULT_EVENTS_POLL_TIMEOUT):\n # pylint: disable=no-self-use\n\n expiration = transfer.lock.expiration + 1\n\n while True:\n current_block = raiden.get_block_number()\n\n if current_block > expiration:\n return\n\n gevent.sleep(sleep)",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def wait_for_clone(repo, wait_for_ready, http_exc):\n\n start_time = time.time()\n while time.time() - start_time < wait_for_ready:\n repo.wipe_data()\n try:\n if repo.is_cloned:\n return\n\n except HTTPRequestError:\n _mod_log().debug('Failed to get status of the repository %s', repo.rid)\n\n raise SAPCliError(f'Waiting for the repository to be in READY state timed out\\n{http_exc}')",
"def _wait(self, remain):\n print \"%s waiting %s\" % (self.uid, remain)\n sleep(remain) # dummy implementation just for testing",
"def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)",
"def wait_for_server_ready(self, params):\n hub_client = QTask()\n hub_client.createBlindTask('ubqc', params)\n ret = hub_client.waitBlindTask(1)\n if ret is None:\n return False\n\n path, secret = ret\n self.entry = f\"{blindCompAddr}/{path}\"\n self.secret = secret\n\n return True",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def wait_vm_operation(self, params: dict) -> Tuple[\"Status\", dict]:",
"def wait_till_read_out():\n\n\trespond = send_command('waitreadout')",
"def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)",
"def _wait_secondaries_catch_up(self):\n\n # If this is a config server, we're done. Else, we have to wait for the secondary to catch up\n if self.name == 'config':\n logging.info(\"Config server does not need to check replication lag, finishing\")\n return\n\n # Setting maintenance mode to True, and waiting for the secondary to catch up\n self.client['admin'].command({'replSetMaintenance' : True})\n while True:\n members = self.client['admin'].command('replSetGetStatus')['members']\n primary = None\n self_member = None\n most_recent_optime = datetime.datetime.fromtimestamp(0)\n for m in members:\n if m['state'] == 1:\n primary = m\n if 'self' in m and m['self']:\n self_member = m\n if m['optimeDate'] > most_recent_optime:\n most_recent_optime = m['optimeDate']\n if primary is not None:\n most_recent_optime = primary['optimeDate']\n repl_lag = most_recent_optime - self_member['optimeDate']\n if repl_lag.seconds < 2:\n logging.info(\"Replication lag for secondary %s is %d seconds, finishing\" % (self_member['name'], repl_lag.seconds))\n self.client['admin'].command({'replSetMaintenance' : False})\n break\n logging.info(\"Replication lag for secondary %s is %d seconds, waiting a bit more\" % (self_member['name'], repl_lag.seconds))\n time.sleep(0.5)",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait():\n time.sleep(1)",
"def wait_for_recv_response(self, client):\r\n for i in range(int(client.timeout/TIMEOUT_CHECK)-1):\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response == 0:\r\n return\r\n sleep(TIMEOUT_CHECK)\r\n with client.incoming_data_lock:\r\n if client.waiting_for_response != 0:\r\n client.waiting_for_response = 0\r\n self.send_ipc(client.socket, self.serializer.RESULT_SUCCESS, SERIALIZER_CMD.RECV_EMPTY, [True])",
"def wait():\n pass",
"def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False",
"def wait_for_ready(self, instance_id, limit, delay=1, pending=False):\r\n for count, new_instance in enumerate(repeat(instance_id), start=1):\r\n instance = self.get_instance(new_instance)\r\n last_reload = lookup(instance, 'lastOperatingSystemReload', 'id')\r\n active_transaction = lookup(instance, 'activeTransaction', 'id')\r\n\r\n reloading = all((\r\n active_transaction,\r\n last_reload,\r\n last_reload == active_transaction\r\n ))\r\n\r\n # only check for outstanding transactions if requested\r\n outstanding = False\r\n if pending:\r\n outstanding = active_transaction\r\n\r\n # return True if the instance has only if the instance has\r\n # finished provisioning and isn't currently reloading the OS.\r\n if instance.get('provisionDate') \\\r\n and not reloading and not outstanding:\r\n return True\r\n\r\n if count >= limit:\r\n return False\r\n\r\n sleep(delay)",
"def wait(self):\n time.sleep(self.next())",
"def wait(self, timeout=60, use_pos=False):\n dmov = self.get_pvobj(\"done_moving\")\n if not use_pos and dmov.isinitialized:\n return dmov.wait_for_value(1, timeout)\n else:\n monpv = self.get_pvobj(\"readback\")\n goal = self.get_par(\"drive\")\n deadband = self.get_par(\"retry_deadband\")\n min = goal - abs(deadband)\n max = goal + abs(deadband)\n return monpv.wait_for_range(min, max, timeout)",
"def wait_for_status(self, res, status='ACTIVE', failures=None,\n interval=2, wait=120):\n failures = ['Error'] if failures is None else failures\n return resource.wait_for_status(\n self, res, status, failures, interval, wait)",
"def wait_and_go():\n yield from asyncio.sleep(0.01, loop=self.loop)\n # reader, writer = ...\n _ = yield from asyncio.open_connection(\n *TEST_ADDRESS, loop=self.loop)",
"def _wait_for_ready(self):\n while not self._ready_to_evict():\n if self._ready_waited > self._ready_timeout:\n raise ClusterTimeout()\n\n time.sleep(self.POLL_PERIOD)\n self._ready_waited += self.POLL_PERIOD\n\n self._mds_map = self._volume_client._rados_command(\"mds dump\", {})",
"def _wait_conn(cls, gen: PQGenConn[RV], timeout: Optional[int]) -> RV:\n return waiting.wait_conn(gen, timeout=timeout)",
"def wait_for_region_operation(self, operation):\n print('Waiting for %s.' %(operation))\n while True:\n result = self.compute.regionOperations().get(\n project=self.project,\n region=self.region,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Region operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)"
]
| [
"0.61075014",
"0.5906587",
"0.58969945",
"0.5668024",
"0.5624796",
"0.5596052",
"0.55692685",
"0.55332476",
"0.54996914",
"0.54770476",
"0.5470404",
"0.5461595",
"0.54182893",
"0.537774",
"0.53650314",
"0.5363016",
"0.5363016",
"0.5363016",
"0.53622925",
"0.5357257",
"0.53549916",
"0.5346251",
"0.533141",
"0.53224015",
"0.5314316",
"0.5298047",
"0.52942187",
"0.5283807",
"0.5269527",
"0.526359"
]
| 0.6588461 | 0 |
Verify that the poller correctly handles nonrecoverable FTS job failures | def test_fts_non_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, caches_mock, metrics_mock):
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
# Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.
did = did_factory.random_file_did()
replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
request = __wait_for_state_transition(dst_rse_id=dst_rse_id, **did)
assert 'Unused hop in multi-hop' in request['err_msg']
assert request['state'] == RequestState.FAILED
request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
assert request['state'] == RequestState.FAILED
assert request['attributes']['source_replica_expression'] == src_rse
# Each hop is a separate transfer, which will be handled by the poller and marked as failed
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2
# Finisher will handle transfers of the same multihop one hop at a time
finisher(once=True, partition_wait_time=0)
finisher(once=True, partition_wait_time=0)
# The intermediate request must not be re-scheduled by finisher
with pytest.raises(RequestNotFound):
request_core.get_request_by_did(rse_id=jump_rse_id, **did)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
# ensure tha the ranking was correctly decreased for the whole path
assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1
assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1
assert request['state'] == RequestState.QUEUED | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0",
"def test_fts_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, file_factory, caches_mock, metrics_mock):\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Create and upload a real file, but register it with wrong checksum. This will trigger\n # a FTS \"Recoverable\" failure on checksum validation\n local_file = file_factory.file_generator()\n did = did_factory.random_file_did()\n did_factory.upload_client.upload(\n [\n {\n 'path': local_file,\n 'rse': src_rse,\n 'did_scope': did['scope'].external,\n 'did_name': did['name'],\n 'no_register': True,\n }\n ]\n )\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, **did)\n assert request['state'] == RequestState.FAILED\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.FAILED\n\n # Each hop is a separate transfer, which will be handled by the poller and marked as failed\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2",
"def testTrialErrored2(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(\n mock_runner, t, result(stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_error(mock_runner, trials[-1])\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))",
"def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: FOO is not a valid search criteria\"),\n str(results))",
"def test_check_freq_ts_crash(self):\n self.assertEqual(check_freq(self.jobset3), 'ocrashed')",
"def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: Invalid search command FOO\"),\n str(results),\n )",
"def _retry_occurred(self):",
"def test_retry_run(self):\n pass",
"def testTrialErrored(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_error(mock_runner, t3)\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(\n mock_runner, t1, result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(\n mock_runner, t2, result(stats[str(1)][\"r\"], 10)))",
"def abort_search(self):\n self._raise_not_supported()",
"def test_later_failure_result(self):\n d = Deferred()\n dr = EventualResult(d, None)\n result_list = []\n done = append_in_thread(result_list, dr.wait, 100)\n time.sleep(0.1)\n d.errback(RuntimeError())\n done.wait(100)\n self.assertEqual(\n (result_list[0], result_list[1].__class__), (False, RuntimeError))",
"def test_loopFailure_failedRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n @inlineCallbacks\n def _failedToRun(self, locked=False, delay=None):\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"failedToRun\", _failedToRun)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))",
"def test_invalidTerm(self):\n query = 'FOO'\n\n def search():\n return self.client.search(query)\n\n d = self.connected.addCallback(strip(search))\n d = self.assertFailure(d, imap4.IMAP4Exception)\n\n def errorReceived(results):\n \"\"\"\n Verify that the server logs an IllegalQueryError and the\n client raises an IMAP4Exception with 'Search failed:...'\n \"\"\"\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: FOO is not a valid search criteria\"),\n str(results))\n\n d.addCallback(errorReceived)\n d.addErrback(self._ebGeneral)\n self.loopback()\n return d",
"def failure_occurred(self):\n if self._running_on_failure_keyword or not self.run_on_failure_keyword:\n return\n try:\n self._running_on_failure_keyword = True\n BuiltIn().run_keyword(self.run_on_failure_keyword)\n except Exception as err:\n logger.warn(\"Keyword '%s' could not be run on failure: %s\"\n % (self.run_on_failure_keyword, err))\n finally:\n self._running_on_failure_keyword = False",
"def on_failure(self):\n pass",
"def test_raise_error_on_parallel_unavailable(self):\r\n self.assertRaises(\r\n RuntimeError,\r\n raise_error_on_parallel_unavailable,\r\n {})\r\n self.assertRaises(RuntimeError, raise_error_on_parallel_unavailable,\r\n {'jobs_to_start': '1'})\r\n raise_error_on_parallel_unavailable({'jobs_to_start': '2'})\r\n raise_error_on_parallel_unavailable({'jobs_to_start': '24'})",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')",
"def check_errors(self) -> None:",
"def test_loopFailure_recovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 1)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 1)\n self.assertGreater(jobs[0].notBefore, datetime.datetime.utcnow() + datetime.timedelta(seconds=30))",
"def test_temporaryFailure(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow() + datetime.timedelta(seconds=90))",
"def test_loopFailure_noRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldNextJob = JobItem.nextjob\n @inlineCallbacks\n def _nextJob(cls, txn, now, minPriority, rowLimit):\n job = yield oldNextJob(txn, now, minPriority, rowLimit)\n work = yield job.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"nextjob\", classmethod(_nextJob))\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))",
"def testSearchAlgStalled(self):\n ray.init(num_cpus=4, num_gpus=2)\n experiment_spec = {\n \"run\": \"__fake\",\n \"num_samples\": 3,\n \"stop\": {\n \"training_iteration\": 1\n }\n }\n experiments = [Experiment.from_json(\"test\", experiment_spec)]\n searcher = _MockSuggestionAlgorithm(max_concurrent=1)\n searcher.add_configurations(experiments)\n runner = TrialRunner(search_alg=searcher)\n runner.step()\n trials = runner.get_trials()\n self.assertEqual(trials[0].status, Trial.RUNNING)\n\n runner.step()\n self.assertEqual(trials[0].status, Trial.TERMINATED)\n\n trials = runner.get_trials()\n runner.step()\n self.assertEqual(trials[1].status, Trial.RUNNING)\n self.assertEqual(len(searcher.live_trials), 1)\n\n searcher.stall = True\n\n runner.step()\n self.assertEqual(trials[1].status, Trial.TERMINATED)\n self.assertEqual(len(searcher.live_trials), 0)\n\n self.assertTrue(all(trial.is_finished() for trial in trials))\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n\n searcher.stall = False\n\n runner.step()\n trials = runner.get_trials()\n self.assertEqual(trials[2].status, Trial.RUNNING)\n self.assertEqual(len(searcher.live_trials), 1)\n\n runner.step()\n self.assertEqual(trials[2].status, Trial.TERMINATED)\n self.assertEqual(len(searcher.live_trials), 0)\n self.assertTrue(searcher.is_finished())\n self.assertTrue(runner.is_finished())",
"def _nupicHyperSearchHasErrors(hyperSearchJob):\n # TODO flesh me out\n\n # Get search ID for our latest hypersearch\n\n # Query Nupic for experiment failures in the given search\n\n return False",
"def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})",
"def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']\n\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()",
"def test_poll(self):\n ret = poll.poll(self.args)\n self.assertEqual(ret, poll.NOERROR)",
"def test_results_lookup_error(self, affiliate_items):\n item_pks = affiliate_items.values_list('pk', flat=True)\n\n def error_first(item):\n if item.name == \"0\":\n raise LookupError()\n\n update_function = mock.Mock(side_effect=error_first)\n batch_job = BatchJob(affiliate_items, update_function)\n\n success_count = 0\n for result in batch_job.run():\n success_count += int(not result.is_error)\n\n assert success_count == 3\n\n items = AffiliateItem.objects.filter(pk__in=item_pks)\n assert items.count() == 3",
"def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()",
"def on_trial_error(self, trial: Trial):\n pass",
"def failed(self):\n\t\tpass"
]
| [
"0.62607217",
"0.59742904",
"0.58287543",
"0.57768625",
"0.57686794",
"0.57673967",
"0.5752812",
"0.57482076",
"0.5740342",
"0.5722048",
"0.56978965",
"0.5689482",
"0.5688108",
"0.5656253",
"0.5634875",
"0.56320745",
"0.5628242",
"0.5626997",
"0.56194526",
"0.5616439",
"0.55902445",
"0.5579771",
"0.55669695",
"0.5557042",
"0.5551753",
"0.55297315",
"0.55084646",
"0.55022395",
"0.5476697",
"0.54671544"
]
| 0.60350597 | 1 |
Verify that the receiver correctly handles multihop jobs which fail | def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, caches_mock, metrics_mock):
receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})
receiver_thread.start()
try:
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
# Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.
did = did_factory.random_file_did()
replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)
assert request['state'] == RequestState.FAILED
request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)
assert request['state'] == RequestState.FAILED
assert 'Unused hop in multi-hop' in request['err_msg']
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1
# Finisher will handle transfers of the same multihop one hop at a time
finisher(once=True, partition_wait_time=0)
finisher(once=True, partition_wait_time=0)
# The intermediate request must not be re-scheduled by finisher
with pytest.raises(RequestNotFound):
request_core.get_request_by_did(rse_id=jump_rse_id, **did)
request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
# ensure tha the ranking was correctly decreased for the whole path
assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1
assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1
assert request['state'] == RequestState.QUEUED
finally:
receiver_graceful_stop.set()
receiver_thread.join(timeout=5)
receiver_graceful_stop.clear() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0",
"def test_multihop_receiver_on_success(vo, did_factory, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n did = did_factory.upload_test_file(src_rse)\n rule_priority = 5\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None, priority=rule_priority)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n\n fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})\n assert fts_response[request['external_id']][request['id']].job_response['priority'] == rule_priority\n\n # Two hops; both handled by receiver\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()",
"def test_redelivery_of_rejected_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 99)\n localConfig.submit_sm_throughput = 3\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 60 messages to the queue\n startAt = datetime.now()\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 60:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n counter = 0\n _receivedSubmitsCount = 0\n # Wait for 40 seconds before checking if all submits were delivered\n # It will check for throughput in each iteration\n while counter < 30:\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n\n _receivedSubmitsCount = len(receivedSubmits)\n\n # Wait some time\n yield waitFor(1)\n\n counter += 1\n endAt = datetime.now()\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 60)",
"def test_raise_error_on_parallel_unavailable(self):\r\n self.assertRaises(\r\n RuntimeError,\r\n raise_error_on_parallel_unavailable,\r\n {})\r\n self.assertRaises(RuntimeError, raise_error_on_parallel_unavailable,\r\n {'jobs_to_start': '1'})\r\n raise_error_on_parallel_unavailable({'jobs_to_start': '2'})\r\n raise_error_on_parallel_unavailable({'jobs_to_start': '24'})",
"def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)",
"def final_send_message_validation(self):\n stats = self.transport.get_stats()\n randomdrops = stats.randomdrops\n forcedrops = stats.forcedrops\n mapsize = len(self.msgmap)\n msg = \"Final verification ForceDrops: %d RandomDrops: %d MISSING: %d\"\n _LOGGER.info(msg, forcedrops, randomdrops, (mapsize - randomdrops))\n\n if randomdrops != mapsize:\n # We will fail this test later, but do some logging here...\n _LOGGER.info(\"Drop queue size: %d\", len(self.droppedmsgs))\n\n if _TRACE.enabled():\n def logmr(id_, mrec):\n _TRACE(\"missing mr id: %s drop: %d\", id_, mrec.alwaysdrop)\n self.msgmap.process_all(logmr)\n\n self.harness.assertEqual(randomdrops, mapsize)",
"def test_send_to_grader_fail(self):\r\n\r\n student_response = \"This is a student submission\"\r\n self.mock_xqueue.send_to_queue.return_value = (1, \"Not Queued\")\r\n result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)\r\n self.assertFalse(result)",
"def test_worker_failure(app):\n sentinel = 50\n\n def mocked_deserialise(fields):\n job = Job.deserialise(fields)\n if job.args == [sentinel]:\n raise Chaos(\"Found sentinel job\")\n return job\n\n with mock.patch(\"fennel.worker.broker.Job\", wraps=Job) as mocked_job:\n mocked_job.deserialise.side_effect = mocked_deserialise\n\n # The worker crashes on the 50th job execution, wait times out.\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.1, maxwait=1)\n\n assert count_results(app) < 100\n assert sentinel not in (r.return_value for r in all_results(app))\n assert len(state.queue.messages) >= 1\n assert len(state.queue.groups) == 1\n assert state.queue.groups[0].pending >= 1\n assert len(state.heartbeats) == 1\n dead_executor_id = state.heartbeats[0].executor_id\n\n # Complete the job processing with a new worker (must wait long enough for\n # maintenance to happen and the dead worker's pending jobs to be reassigned).\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n assert count_results(app) == 100\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n assert len(state.heartbeats) == 1\n assert state.heartbeats[0].executor_id != dead_executor_id",
"def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex",
"def test_check_for_errors__no_errors(self):\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n\n request = mock.Mock()\n completed_futures = [mock.Mock(exception=mock.Mock(return_value=None))] * 3\n\n # does not raise error\n downloader._check_for_errors(request, completed_futures)",
"def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError('Shopping'))\n batch_job = BatchJob(affiliate_items, updater)\n\n with_message = 0\n for result in batch_job.run():\n with_message += (result.is_error and 'Shopping' in result.details)\n\n assert with_message == 4",
"def test_error(self, mock_validate_session):\n mock_validate_session.return_value = False\n with pytest.raises(SystemExit) as ctx:\n celery_command.worker(Namespace(queues=1, concurrency=1))\n assert str(ctx.value) == \"Worker exiting, database connection precheck failed.\"",
"def transaction_failed_before_processing(self):",
"def test_failed_processing(self):\n # setup\n ledger_api_dialogue, fipa_dialogue = self._setup_fipa_ledger_api_dialogues(self)\n\n self.transaction_behaviour.timedout.add(ledger_api_dialogue.dialogue_label)\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.transaction_behaviour.failed_processing(ledger_api_dialogue)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n # finish_processing\n assert self.transaction_behaviour.timedout == set()\n\n mock_logger.assert_any_call(\n logging.DEBUG,\n f\"Timeout dialogue in transaction processing: {ledger_api_dialogue}\",\n )\n\n # failed_processing\n assert fipa_dialogue in self.transaction_behaviour.waiting",
"def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())",
"def test_sync_call_not_ignore_error(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n manager.probe_unhealthy_actors(mark_healthy=True)\n results.extend(manager.foreach_actor(lambda w: w.call()))\n # Wait for actors to recover.\n wait_for_restore()\n\n # Some calls did error out.\n self.assertTrue(any([not r.ok for r in results]))\n\n manager.clear()",
"def retry(self):\n # XXX: check whether it is possible to distingish \n # between the error conditions and set meaningfull exitcode\n return False",
"def test_mirco_service_error():\n data_list = [\"1\", \"2\", \"3\"]\n service_list = []\n\n fail_service = MicroService()\n fail_service.process = create_process_fail_func(\"4\")\n service_list.append(fail_service)\n\n for d in data_list:\n service = MicroService()\n service.process = create_process_func(d)\n service_list.append(service)\n\n\n service_queue = build_micro_service_queue(service_list)\n test_data = \"test_data\"\n context = Context()\n context.state = State()\n\n with pytest.raises(SATOSAAuthenticationError):\n service_queue.process_service_queue(context, test_data)",
"def check_errors(self) -> None:",
"def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4",
"def test_worker_precheck_exception(self):\n assert airflow.settings.validate_session()",
"def test_error_statuses(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n\n # We will call execute 4 times. It will throw 3 errors and 1 valid\n block._execute_snmp_request = MagicMock(\n side_effect=[SAMPLE_ERROR_SNMP_RESPONSE,\n SAMPLE_ERROR_STATUS_SNMP_RESPONSE,\n SAMPLE_SNMP_RESPONSE,\n Exception])\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}]\n })\n block.start()\n\n # Send 4 signals to the block, causing 4 requests to go out\n block.process_signals([Signal({\"sig\": i}) for i in range(4)])\n\n # Execute request should have been called 4 times\n self.assertEqual(block._execute_snmp_request.call_count, 4)\n\n # Handle data should only be called for the valid response\n self.assertEqual(block._handle_data.call_count, 1)\n self.assertEqual(block._handle_data.call_args[0][0], [])\n block.stop()",
"def test_ESME_RREPLACEFAIL(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n yield self.add(self.defaultConfig)\n yield self.start(self.defaultConfig.id)\n\n # Wait for 'BOUND_TRX' state\n yield waitFor(2)\n\n # Send submit_sm\n SentSubmitSmPDU = copy.copy(self.SubmitSmPDU)\n SentSubmitSmPDU.params['short_message'] = 'test_error: ESME_RREPLACEFAIL'\n msgid = yield self.submit_sm(self.defaultConfig.id, self.SubmitSmPDU, self.SubmitSmBill.user.uid)\n\n # Wait\n yield waitFor(70)\n\n yield self.stop(self.defaultConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n # By default, ESME_RREPLACEFAIL is not retried !\n self.assertEqual(len(receivedSubmits), 1)",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')",
"def test_prevent_wrong_cores(self):\n self.assertRaises(cinv.host.Error, self.wrong_cores)",
"def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')",
"def errorCheck(self):\n\t\twhile 1:\n #check for bad state\n\t\t\tif epics.caget(self.error_bypass) == 1:\n\t\t\t\tout_msg=\"Bypass flag is TRUE\"\n elif epics.caget(self.error_bcs) != 1:\n out_msg=\"BCS tripped\"\n elif epics.caget(self.error_mps) != 0:\n out_msg=\"MPS tripped\"\n elif epics.caget(self.error_gaurdian) != 0:\n out_msg=\"Gaurdian tripped\"\n\t\t\n #elif epics.caget(self.error_und_tmit) < 5.0e7:\n # out_msg=\"UND Tmit Low\"\n else:\n out_msg='Everything Okay'\n\n #exit if the stop button is set\n #if not self.mi.getter.caget(\"SIOC:SYS0:ML03:AO702\"):\n\t\t\tif not epics.caget(\"SIOC:SYS0:ML03:AO702\"):\n break\n\n #set the error check message\n epics.caput (\"SIOC:SYS0:ML00:CA000\",out_msg)\n print out_msg\n\n #break out if error check is bypassed\n if (out_msg==\"Bypass flag is TRUE\"):\n break\n\n #break out if everything is okay\n if (out_msg==\"Everything Okay\"):\n epics.caput(self.error_tripped,0)\n break\n\t\t\t\t#return\n else:\n epics.caput(self.error_tripped,1)\n time.sleep(0.1)",
"def test_block_bad_batch(self):\n pass",
"def test_check_for_errors(self):\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n\n request = mock.Mock()\n exception = ValueError(\"failed\")\n\n successful_future = mock.Mock(exception=mock.Mock(return_value=None))\n failed_future = mock.Mock(exception=mock.Mock(return_value=exception))\n completed_futures = (\n ([successful_future] * 2) + [failed_future] + [successful_future]\n )\n\n with pytest.raises(exception.__class__):\n downloader._check_for_errors(request, completed_futures)",
"def test_fts_non_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, caches_mock, metrics_mock):\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, **did)\n assert 'Unused hop in multi-hop' in request['err_msg']\n assert request['state'] == RequestState.FAILED\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.FAILED\n assert request['attributes']['source_replica_expression'] == src_rse\n\n # Each hop is a separate transfer, which will be handled by the poller and marked as failed\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED"
]
| [
"0.64318717",
"0.62214303",
"0.6139489",
"0.6066804",
"0.6022608",
"0.5992404",
"0.59125996",
"0.591222",
"0.5903248",
"0.5877526",
"0.58677924",
"0.58624184",
"0.58438545",
"0.5821324",
"0.58089757",
"0.57990533",
"0.57859594",
"0.57769114",
"0.57712877",
"0.5770392",
"0.576773",
"0.5759633",
"0.5744696",
"0.57198",
"0.5710442",
"0.5703661",
"0.5694934",
"0.5687848",
"0.56609416",
"0.5655628"
]
| 0.6547255 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.