query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return whether a command has negatively controlled qubits. | def has_negative_control(cmd):
return get_control_count(cmd) > 0 and '0' in cmd.control_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2",
"def is_use_qps(self) -> bool:\n if self.qps > 0 and self.second > 0:\n return True\n else:\n return False",
"def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0",
"def is_qword(self):\n return ida_bytes.is_qword(self.flags)",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def is_non_exclusive(self, variable):\n non_exclusive = False\n for sub_effect in self._sub_effects:\n if sub_effect.get_variable() == variable:\n if not sub_effect.is_exclusive():\n non_exclusive = True\n elif len(sub_effect.get_value()) > 0 and not sub_effect.is_negated():\n return False\n return non_exclusive",
"def __bool__(self):\n return not(self.outcome != 0 or self.filled)",
"def qtilde(self) -> bool:\n return self._qtilde",
"def __bool__(self):\n return any(self.smask)",
"def noqueue(self) -> bool:\n return not self.orders",
"def is_unset(self) -> bool:\n return (None in [self.amount, self.action, self.per]) is True",
"def __bool__(self):\n return not self.undefine",
"def is_queued(self):\r\n return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)",
"def is_terminal(self):\n return self.beta.isEmpty()",
"def is_market(self):\n return(not self.is_pending)",
"def still_has_questions(self):\n return self.question_number < len(self.question_list) #returns boolean value",
"def is_empty(self) -> bool:\n return self.command is None and not self.required",
"def canAnswer(self, layer):\n return wt.hangman in layer[wt.keywords]",
"def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False",
"def skill_has_nonpct_condition(es):\n if not skill_has_condition(es):\n return False\n # Is checking the threshold here right? Maybe it should just be checking one_time.\n # Or maybe it's redundant.\n return es.condition.hp_threshold or es.condition.one_time",
"def no_flags_set(self):\n # TODO: unit test me\n return not any(\n (\n self.flag_bookmarked,\n self.flag_candidate,\n self.flag_final_causative,\n self.flag_for_validation,\n self.flag_molecular != \"empty\",\n self.flag_visual != \"empty\",\n self.flag_validation != \"empty\",\n self.flag_phenotype_match != \"empty\",\n self.flag_summary != \"empty\",\n )\n )",
"def is_neg_unate(self, vs=None):\n vs = self._expect_vars(vs)\n basis = self.support - set(vs)\n maxcov = [PC_ONE] * (1 << len(basis))\n # Test whether table entries are monotonically decreasing\n for cf in self.iter_cofactors(vs):\n for i, item in enumerate(cf.pcdata):\n if maxcov[i] == PC_ZERO and item == PC_ONE:\n return False\n maxcov[i] = item\n return True",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False",
"def hasCustomEffects(self):\n return not self.getHandle().effects.isEmpty()",
"def is_pending(self):\n return self.is_disarming() or self.is_arming()",
"def __bool__(self):\n\t\treturn any(c != 0 for c in self)",
"def is_Q(self):\n return isinstance(self,Q)",
"def still_has_questions(self):\n return self.question_number < len(self.question_list)",
"def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True"
] | [
"0.6443017",
"0.62384444",
"0.6159438",
"0.59778637",
"0.59440106",
"0.59436524",
"0.5875699",
"0.5865012",
"0.5853049",
"0.5851665",
"0.58293706",
"0.582807",
"0.5757742",
"0.5732011",
"0.5711578",
"0.56456465",
"0.56133777",
"0.5585858",
"0.55658156",
"0.55565083",
"0.5542356",
"0.5531333",
"0.55238205",
"0.55220777",
"0.5516755",
"0.5510306",
"0.54946977",
"0.5479426",
"0.54598325",
"0.5458763"
] | 0.69478786 | 0 |
Creates a starboard. A starboard is a channel which has messages with some stars. To configure this starboard (such as max age and threshold, which are 7 days and 5 stars by default), use starconfig's subcommands. See the help for details. | async def starboard(self, ctx):
if self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?",(ctx.guild.id,)).fetchone():
return await ctx.say("star.already")
async with ctx.typing():
await ctx.channel.edit(
topic=TOPIC.format(mention=self.bot.user.mention, threshold=5, age=7), # yeah can't be localized
nsfw=False,
reason="Starboard preparation"
)
await ctx.channel.set_permissions(ctx.guild.me,
read_messages=True,
send_messages=True,
add_reactions=True,
manage_messages=True,
embed_links=True,
attach_files=True,
read_message_history=True,
manage_roles=True,
manage_channels=True
)
await ctx.channel.set_permissions(ctx.guild.default_role,
read_messages=True,
send_messages=False,
add_reactions=True,
read_message_history=True
)
tutorial = await ctx.say("star.done", STAR_EMOJI)
try:
await tutorial.pin()
except discord.HTTPException:
pass
self.bot.db.execute("INSERT INTO starboards(guild_id, channel_id,threshold,age,enabled) VALUES (?, ?,5,7,1)", (ctx.guild.id, ctx.channel.id))
starboard_id = self.bot.db.execute("SELECT starboard_id FROM starboards WHERE channel_id = ?", (ctx.channel.id,)).fetchone()["starboard_id"]
self.bot.db.execute("UPDATE guilds SET starboard_id = ? WHERE guild_id = ?", (starboard_id, ctx.guild.id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def add_starboard(self, ctx):\n channel = await ctx.get_text_channel(embed=CustomEmbeds.add(author=\"Channel\",\n description=\"Send a channel to add it to the starboard!\"))\n emotes = await ctx.get_emotes(embed=CustomEmbeds.add(author=\"Emotes\",\n description=\"React with emotes and then click ✅ to add them to the starboard.\"))\n threshold = await ctx.get_int(embed=CustomEmbeds.add(author=\"Add a Threshold\",\n description=\"Send message with the minimum number of reactions for it to be added to the starboard.\"))\n\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n if guild_starboards is None:\n starboard_len = 0\n else:\n starboard_len = len(guild_starboards.get(\"starboards\"))\n\n starboard = Starboard(index=starboard_len,\n channel=channel,\n emotes=emotes,\n threshold=threshold)\n\n await self.db_add_starboard(ctx.guild, starboard.serialize())\n await ctx.send(embed=CustomEmbeds.confirm(author=\"Starboard Added\", description=f\"ID: {starboard_len}\\n\"\n f\"Channel: {channel.mention}\\n\"\n f\"Emotes: {' '.join(emotes)}\\n\"\n f\"Threshold: {threshold}\"))",
"def create_star(rk_settings, screen, stars, star_number, row_number):\r\n\tstar = Star(rk_settings, screen)\r\n\tstar_width = star.rect.width\r\n\tstar.x = star_width + 2 * star_width * star_number\r\n\tstar.rect.x = star.x\r\n\tstar.rect.y = star.rect.height + 2 * star.rect.height * row_number\r\n\tstars.add(star)",
"async def stars(self, ctx: commands.Context, stars: int):\n self.stars = stars\n await self._update_db()\n\n await ctx.send(\n f\"Done.Now this server needs `{stars}` :star: to appear on the starboard channel.\"\n )",
"def create_star():\n if config.S_LIST == []:\n sitem = scene.Star(randint(2, common.COLS-2), randint(2, common.R1_R))\n config.S_LIST.append(sitem)\n elif randint(0, 5) == 1:\n sitem = scene.Star(randint(2, common.COLS-2), randint(2, common.R1_R))\n config.S_LIST.append(sitem)",
"def create_fleet(rk_settings, screen, rock, stars):\r\n\t# Create a star and find the number of stars in a row.\r\n\tstar = Star(rk_settings, screen)\r\n\tnumber_stars_x = get_number_stars_x(rk_settings, star.rect.width)\r\n\tnumber_rows = get_number_rows(rk_settings, rock.rect.height, \r\n\t\t\t\t\t\t\t\t\tstar.rect.height)\r\n\t\t\t\t\t\t\t\t\t\r\n\t# Create the first row of stars.\r\n\tfor row_number in range(number_rows):\r\n\t\tfor star_number in range(number_stars_x):\r\n\t\t\tcreate_star(rk_settings, screen, stars, star_number,\r\n\t\t\t\t\t\trow_number)",
"def board_stars(self):\r\n return BoardStars(self)",
"def board_star(self, board_star_id):\r\n return BoardStar(self, board_star_id)",
"def _create_stars(self, stars_number, row_number):\n star = Star(self)\n stars_width, stars_height = star.rect.size\n star.x = stars_width + 2 * stars_width * stars_number\n star.rect.x = star.x\n star.rect.y = star.rect.height + 2 * star.rect.height * row_number\n self.stars.add(star)",
"async def starboard(self, ctx):\n pass",
"async def list_starboard(self, ctx):\n entries = []\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n\n if guild_starboards is None or guild_starboards.get(\"starboards\") is None:\n entries.append((\"No Starboards\", \"This guild has no starboards setup\"))\n return await StarboardPages(ctx, entries=entries).paginate()\n starboards = guild_starboards.get(\"starboards\")\n\n entries.append((\"Guild Starboard Status\", f\"Activated: `{guild_starboards.get('activated')}`\"))\n\n for starboard in starboards:\n entries.append((f\"Starboard #{starboard.get('_id')}\", f\"Channel: <#{starboard.get('channel')}>\\n\"\n f\"Emotes: {' '.join(starboard.get('emotes'))}\\n\"\n f\"Threshold: `{starboard.get('threshold')}`\\n\"\n f\"Created: `{starboard.get('created').strftime('%b %d %Y %H:%M:%S')}`\\n\"\n f\"Activated: `{starboard.get('activated')}`\"))\n\n pages = StarboardPages(ctx, entries=entries)\n await pages.paginate()",
"def _create_galaxy(self):\n # Make a star.\n star = Star(self)\n stars_width, stars_height = star.rect.size\n # Fill galaxy across the screen\n available_space_x = self.settings.screen_width - (2 * stars_width)\n number_stars_x = available_space_x // (2 * stars_width)\n # Determine the number of rows of stars that fit on the screen.\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * stars_height) - ship_height)\n number_rows = available_space_y // (2 * stars_height)\n # Create the full galaxy of stars.\n for row_number in range(number_rows):\n # Create the first row of stars.\n for stars_number in range(number_stars_x):\n self._create_stars(stars_number, row_number)",
"async def set_starboard_channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n try:\n self.starboard_guilds[str(ctx.guild.id)][\"channel\"] = channel.id\n except:\n self.starboard_guilds[str(ctx.guild.id)] = {}\n self.starboard_guilds[str(ctx.guild.id)][\"channel\"] = channel.id\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"Starred messages will be sent to {0}\".format(channel.mention))",
"async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")",
"def create_new_board():\n\n board = Board()\n board.print_board()",
"async def starboard_current(self, ctx):\n starboard_settings = self.bot.cache.starboard_settings.get(str(ctx.guild.id))\n if not starboard_settings:\n raise exceptions.Warning(\"Nothing has been configured on this server yet!\")\n\n (\n is_enabled,\n board_channel_id,\n required_reaction_count,\n emoji_name,\n emoji_id,\n emoji_type,\n log_channel_id,\n ) = starboard_settings\n\n if emoji_type == \"custom\":\n emoji = self.bot.get_emoji(emoji_id)\n else:\n emoji = emoji_name\n\n blacklisted_channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id FROM starboard_blacklist WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n as_list=True,\n )\n\n content = discord.Embed(title=\":star: Current starboard settings\", color=int(\"ffac33\", 16))\n content.add_field(\n name=\"State\", value=\":white_check_mark: Enabled\" if is_enabled else \":x: Disabled\"\n )\n content.add_field(name=\"Emoji\", value=emoji)\n content.add_field(name=\"Reactions required\", value=required_reaction_count)\n content.add_field(\n name=\"Board channel\",\n value=f\"<#{board_channel_id}>\" if board_channel_id is not None else None,\n )\n content.add_field(\n name=\"Log channel\",\n value=f\"<#{log_channel_id}>\" if log_channel_id is not None else None,\n )\n content.add_field(\n name=\"Blacklisted channels\",\n value=\" \".join(f\"<#{cid}>\" for cid in blacklisted_channels)\n if blacklisted_channels\n else None,\n )\n\n await ctx.send(embed=content)",
"def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"async def post_starred(bot, config, message, reaction_emoji, user):\r\n\t\treacts = message.reactions\r\n\t\tmod_starred = False\r\n\t\tstarlist = None\r\n\t\tstarcount = None\r\n\t\tstarcount_reached = False\r\n\t\t# if the post is older than a week, don't bother putting it on the board\r\n\t\tif (datetime.datetime.now() - message.created_at).total_seconds() > 604800:\r\n\t\t\treturn\r\n\t\t# check if the poster of the starred message is blacklisted from the starboard\r\n\t\tif message.author.id in config[\"starboard\"][\"blacklisted_users\"]:\r\n\t\t\treturn\r\n\t\t# count the number of stars a post has\r\n\t\tfor react in reacts:\r\n\t\t\tif react.emoji == config[\"starboard\"][\"emoji\"]:\r\n\t\t\t\tstarlist = [x async for x in react.users()]\r\n\t\t\t\tstarcount = len(starlist)\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\treturn\r\n\t\t# check if the star count was reached\r\n\t\ttry:\r\n\t\t\t# if there's a star requirement for a specific channel, and the starred message is in that channel,\r\n\t\t\t# check if the star count surpasses the requirement for that channel\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][message.channel.name]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# if there isn't a channel-specific star count this message must follow,\r\n\t\texcept KeyError:\r\n\t\t\t# just check to see if it meets the global requirement\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][\"global\"]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# check if a mod starred the post\r\n\t\tfor reactor in starlist:\r\n\t\t\tif Starboard.modcheck(bot, config, reactor) and config[\"starboard\"][\"role_override\"] == \"true\":\r\n\t\t\t\tstarcount_reached = True\r\n\t\t\t\tbreak\r\n\t\t# anti-self-star code\r\n\t\tif message.author.id == user.id:\r\n\t\t\tawait message.remove_reaction(reaction_emoji, message.author)\r\n\t\t\t# count the number of self-star alerts out of the last 50 messages\r\n\t\t\tcounter = 0\r\n\t\t\tasync for message in message.channel.history(limit=50):\r\n\t\t\t\tif \"IS A THOT AND SELF-STARRED THEIR MEME\" in message.content:\r\n\t\t\t\t\tcounter += 1\r\n\t\t\t# if there's been less than three, send a self-star alert\r\n\t\t\t# this is to prevent spam from CERTAIN THOTS THAT LOVE SPAMMING IT\r\n\t\t\tif counter < 3:\r\n\t\t\t\tselfstar_alert = '🚨 🚨 ' + user.mention + ' IS A THOT AND SELF-STARRED THEIR MEME 🚨 🚨'\r\n\t\t\t\tawait message.channel.send(selfstar_alert)\r\n\t\t\treturn\r\n\t\tif starcount_reached and message.author.id != bot.user.id:\r\n\t\t\tawait Starboard.post_to_starboard(bot, message, starcount)",
"def generate_star_file(stack_label, previous_classes_bool=False, recent_class=\"classes_0.star\"):\n star_file = \"{}.star\".format(stack_label)\n if previous_classes_bool:\n print(\"It looks like previous jobs have been run in this directory. The most recent output class is: {}\".format(recent_class))\n new_star_file = os.path.splitext(recent_class)[0]+\"_appended.star\"\n print(\"Instead of classes_0.star, the new particles will be appended to the end of that par file and saved as {}\".format(new_star_file))\n _ = append_new_particles(old_particles=recent_class, new_particles=star_file, output_filename=new_star_file)\n else:\n print(\"No previous classes were found. A new par file will be generated at classes_0.star\")\n new_star_file = \"classes_0.star\"\n shutil.copy(star_file, new_star_file)\n return new_star_file",
"async def setstar(self, ctx: Message):\n\t\tglobal msg #making the variables global so we can access them from any command\n\t\tglobal msgauthor\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tstarred = msg\n\t\tstarredauthor = msgauthor\n\t\tawait self.send(f\"Starred message was set! You may access it with d!starred\")",
"def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass",
"def SetStars(self):\r\n\t\tstartype = [self._iconstars[\r\n\t\t\tself.CalcStar(starnum,\\\r\n\t\t\t\tself._configtmp[\"imagerating\"],\r\n\t\t\t\tself._configtmp[\"userrating\"])]\\\r\n\t\t\tfor starnum in range(1,6)]\r\n\t\tself.bitmapButton1Star.SetBitmapLabel(startype[0])\r\n\t\tself.bitmapButton2Star.SetBitmapLabel(startype[1])\r\n\t\tself.bitmapButton3Star.SetBitmapLabel(startype[2])\r\n\t\tself.bitmapButton4Star.SetBitmapLabel(startype[3])\r\n\t\tself.bitmapButton5Star.SetBitmapLabel(startype[4])",
"def __init__(self, img, header, starobj, halosize=40, padsize=40, mask=None, hscmask=None):\n Celestial.__init__(self, img, mask, header=header)\n if hscmask is not None:\n self.hscmask = hscmask\n self.name = 'star'\n self.scale_bar_length = 3\n # Trim the image to star size\n # starobj should at least contain x, y, (or ra, dec) and \n # Position of a star, in numpy convention\n x_int = int(starobj['x'])\n y_int = int(starobj['y'])\n dx = -1.0 * (starobj['x'] - x_int)\n dy = -1.0 * (starobj['y'] - y_int)\n halosize = int(halosize)\n # Make padded image to deal with stars near the edges\n padsize = int(padsize)\n ny, nx = self.image.shape\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.image\n # Star itself, but no shift here.\n halo = im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1]\n self._image = halo\n self.shape = halo.shape\n self.cen_xy = [x_int, y_int]\n self.dx = dx\n self.dy = dy \n # FLux\n self.flux = starobj['flux']\n self.fluxann = starobj['flux_ann']\n\n if hasattr(self, 'mask'):\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.mask\n # Mask itself, but no shift here.\n halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1])\n self._mask = halo\n \n if hasattr(self, 'hscmask'):\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.hscmask\n # Mask itself, but no shift here.\n halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1])\n self.hscmask = halo",
"def __draw_board(self):\n\n COLOR = (0, 0, 0, 200)\n LINE_WIDTH = 2\n STAR_POINT_SIZE = 4\n FONT_SIZE = 18\n\n (boardSize, drawExtraStarPoints, starPointOffset) = self.settings\n boardSize -= 1\n stepX = self.innerWidth / boardSize\n stepY = self.innerHeight / boardSize\n labelBoardSpacing = self.borderSize / 2\n draw = ImageDraw.Draw(self.baseImage)\n font = ImageFont.truetype(\"assets/font_fifteentwenty.otf\", FONT_SIZE)\n\n # Draw lines and labels\n for i in range(0, boardSize + 1):\n x = self.borderSize + stepX * i\n label = chr(ord('A') + i)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(x, self.borderSize), (x, self.innerHeight + self.borderSize)], COLOR, LINE_WIDTH)\n draw.text((x - labelWidth / 2, self.borderSize - labelHeight - labelBoardSpacing + labelHeight / 2), label, COLOR, font)\n draw.text((x - labelWidth / 2, self.borderSize + self.innerHeight + labelBoardSpacing - labelHeight / 2), label, COLOR, font)\n\n for i in range(0, boardSize + 1):\n y = self.borderSize + stepY * i\n label = str(boardSize - i + 1)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(self.borderSize, y), (self.innerWidth + self.borderSize, y)], COLOR, LINE_WIDTH)\n draw.text((self.borderSize - labelWidth - labelBoardSpacing + labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n draw.text((self.borderSize + self.innerWidth + labelBoardSpacing - labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n\n # Calculate star point positions\n centerX = boardSize / 2 * stepX + self.borderSize\n centerY = boardSize / 2 * stepY + self.borderSize\n leftX = starPointOffset * stepX + self.borderSize\n rightX = (boardSize - starPointOffset) * stepX + self.borderSize\n topY = starPointOffset * stepY + self.borderSize\n bottomY = (boardSize - starPointOffset) * stepY + self.borderSize\n\n # Draw star points\n draw.ellipse([(centerX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n\n if drawExtraStarPoints:\n draw.ellipse([(centerX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(centerX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)",
"async def star_random(self, ctx):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n item = self.bot.db.execute(\n \"SELECT item_id FROM starboard_items WHERE visible = 1 \" \\\n \"ORDER BY random() LIMIT 1\"\n ).fetchone()\n if not item:\n return\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])",
"def star_sprite(self):\n for _ in range(5):\n star = arcade.Sprite(\"Sprites/star.png\", .8)\n star.center_x = random.randrange(0, settings.WIDTH)\n star.center_y = random.randrange(0, settings.HEIGHT)\n self.star_sprites.append(star)",
"def star_graph():\n pylon_graph = graph.graph()\n idx = pylon_graph.add_unique_node(ORIGIN, \"base\")\n star_list = pylon_graph.add_star_to_node(idx, 6)\n pylon_graph.connect_nodes(star_list)\n pylon_graph.save_graph(\"star\")\n return pylon_graph",
"async def set_star_thresh(self, ctx: commands.Context, thresh: int):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n\n self.starboard_guilds[str(ctx.guild.id)][\"thresh\"] = thresh\n\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"The amount of stars needed to get a message to the starboard is now {0}\".format(thresh))",
"async def starboard_emoji(self, ctx, emoji):\n if emoji[0] == \"<\":\n # is custom emoji\n emoji_obj = await util.get_emoji(ctx, emoji)\n if emoji_obj is None:\n raise exceptions.Warning(\"I don't know this emoji!\")\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO starboard_settings (guild_id, emoji_name, emoji_id, emoji_type)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n emoji_name = VALUES(emoji_name),\n emoji_id = VALUES(emoji_id),\n emoji_type = VALUES(emoji_type)\n \"\"\",\n ctx.guild.id,\n None,\n emoji_obj.id,\n \"custom\",\n )\n await util.send_success(\n ctx, f\"Starboard emoji is now {emoji} (emoji id `{emoji_obj.id}`)\"\n )\n else:\n # unicode emoji\n emoji_name = emoji_literals.UNICODE_TO_NAME.get(emoji)\n if emoji_name is None:\n raise exceptions.Warning(\"I don't know this emoji!\")\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO starboard_settings (guild_id, emoji_name, emoji_id, emoji_type)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n emoji_name = VALUES(emoji_name),\n emoji_id = VALUES(emoji_id),\n emoji_type = VALUES(emoji_type)\n \"\"\",\n ctx.guild.id,\n emoji_name,\n None,\n \"unicode\",\n )\n await util.send_success(ctx, f\"Starboard emoji is now {emoji}\")\n await self.bot.cache.cache_starboard_settings()",
"def drawstars(slist=[], best=None, outfile='/tmp/stars.jpg'):\n img = Image.new('RGB', (xmax,ymax), backcol) #blank 8-bit color image\n draw = ImageDraw.Draw(img)\n\n x,y,radius = 400, 300, hole_radius*Cscale\n draw.rectangle( (400+Xmin*Cscale, 300-Ymin*Cscale, 400+Xmax*Cscale, 300-Ymax*Cscale), outline=(0,128,0), fill=None)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,128,0), fill=None)\n\n for i in range(len(slist)):\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,0,0), fill=(0,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(0,0,0) )\n\n i = best #Redraw the 'best' star in red\n try:\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(192,0,0), fill=(192,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(192,0,0) )\n except TypeError,IndexError:\n pass #There is no 'best' star\n\n img.save(outfile, quality=90)"
] | [
"0.74559045",
"0.6628303",
"0.64687115",
"0.6309718",
"0.6219598",
"0.59351027",
"0.59275556",
"0.586937",
"0.5743689",
"0.5702885",
"0.56899554",
"0.56740135",
"0.558157",
"0.5522696",
"0.53798324",
"0.53583425",
"0.52350324",
"0.5224017",
"0.52115166",
"0.52085984",
"0.5154138",
"0.51465183",
"0.5129245",
"0.51274365",
"0.51249164",
"0.51196283",
"0.5101491",
"0.5095697",
"0.50562227",
"0.5039162"
] | 0.7119598 | 1 |
Enables a disabled starboard. | async def enable(self, ctx):
self.bot.db.execute("UPDATE starboards SET enabled = 1 WHERE channel_id = ?", (ctx.channel.id,))
await ctx.say("star.enabled") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable(self):\r\n self.update(enabled=True)",
"def enable(self):\n self.enabled = True",
"def enable(self):\n self.enabled = True",
"async def starboard_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"starboard_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Starboard is now **enabled**\")\n else:\n await util.send_success(ctx, \"Starboard is now **disabled**\")\n await self.bot.cache.cache_starboard_settings()",
"def setEnabled(self, enabled):\n def do(toUpdateList):\n self.enabled = enabled\n self.actions.addAction(do)",
"def enable(self):\n self._enabled = True",
"def setEnabled(self, enable: bool) -> None:\n self.enabled = ...",
"def enable(self):\n self.switch.enable()\n self._enabled = True",
"def set_enabled(self, enabled):\n self.widget.setEnabled(enabled)",
"def set_enabled(self, enabled):\n self.widget.setEnabled(enabled)",
"def set_disabled_switch(self, disabled):\n self.disabled = disabled",
"def enable_button(self, index):\n if index != 0:\n self.roll_dem_bones.setEnabled(True)",
"def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def enable():\n ret = _LIB.led_matrix_click_enable()\n if ret < 0:\n raise Exception(\"led matrix click enable failed\")",
"def setupenabled(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n else:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.vspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n\n for n in range(self.numcols, len(self.vspins)):\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[n].setEnabled(False)",
"def enable(self) -> None:",
"async def star_dm(self, ctx, enable: bool = None):\n if enable is None:\n result = self.bot.db.execute(\"SELECT starboard_dm FROM users WHERE user_id = ?\", (ctx.author.id,)).fetchone()\n enabled = result[\"starboard_dm\"] if result else 0\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enabled]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)\n self.bot.db.execute(\"UPDATE users SET starboard_dm = ? WHERE user_id = ?\",(\n int(enable),\n ctx.author.id\n ))\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enable]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)",
"def enable(self):",
"def set_enabled(self, enabled=True):\n self._enabled = enabled"
] | [
"0.69257647",
"0.673253",
"0.673253",
"0.6605978",
"0.65678227",
"0.6538243",
"0.64849865",
"0.6334245",
"0.6330517",
"0.6330517",
"0.63191867",
"0.62614286",
"0.6255469",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6149117",
"0.6147287",
"0.613502",
"0.6131316",
"0.6121034",
"0.6112404",
"0.60983145"
] | 0.7405919 | 0 |
Sets "max age" for the starboard messages. If a message is older than the specified days, the message is ignored. Note that existing messages are not affected. Defaults to 7 (one week). | async def maxage(self, ctx, age: int):
if age > 0:
self.bot.db.execute("UPDATE starboards SET age = ? WHERE channel_id = ?", (age,ctx.channel.id))
await ctx.say("star.age", age)
await self.set_topic(ctx.channel.id)
else:
await ctx.say("star.unsigned", age) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_age(self, max_age):\n self._max_age = max_age",
"def max_age(self, max_age):\n\n self._max_age = max_age",
"def max_age(self, max_age):\n if (max_age is not None and max_age < -1): # noqa: E501\n raise ValueError(\"Invalid value for `max_age`, must be a value greater than or equal to `-1`\") # noqa: E501\n\n self._max_age = max_age",
"def set_maxdays(name, maxdays):\n pre_info = info(name)\n if maxdays == pre_info[\"max\"]:\n return True\n cmd = \"passwd -x {} {}\".format(maxdays, name)\n __salt__[\"cmd.run\"](cmd, python_shell=False)\n post_info = info(name)\n if post_info[\"max\"] != pre_info[\"max\"]:\n return post_info[\"max\"] == maxdays",
"def max_age_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_age_in_days\")",
"def max_age(self):\n return self._max_age",
"def max_age(self):\n return self._max_age",
"def max_age(self):\n return 120 if self.realtime else 1800",
"def max_jobs_age(self):\n return int(self.__get_option('max_jobs_age'))",
"def max_age(self):\n\n return self._max_age",
"def max_retention_days(self) -> int:\n return pulumi.get(self, \"max_retention_days\")",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value",
"def max_age(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_age\")",
"def message_retention_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"message_retention_in_days\")",
"def max_age_rule(self) -> Optional[pulumi.Input['ApplicationMaxAgeRuleArgs']]:\n return pulumi.get(self, \"max_age_rule\")",
"def show_max_age_label(self):\n self.draw_max_age = True",
"def _send_maximum(self):\n content = {'maximum': self.maximum.isoformat()}\n self.send_action('set_maximum', content)",
"def max_age(self) -> typing.Optional[jsii.Number]:\n return self._values.get('max_age')",
"def _max_days(self):\n # type: (...) -> Union[int, Tuple[int]]\n\n return self.value.max_days",
"def max_frame_age(self) -> float:\n return self._max_frame_age",
"def userMaximum(self, new_max: float) -> None:\n self._user_maximum = new_max\n self.reset_limits()",
"def set_max(self, max):\n self.set_val((self.val[0], max))",
"def _putMailInPast(self, mail, days):\n doc = mail.getEditableContent()\n fid = self.archiver.date_field_id\n doc.edit({fid: doc.getDataModel()[fid] - days}, mail)",
"def max_players(self, max_players):\n\n self._max_players = max_players",
"def org_eclipse_jetty_servlet_max_age(self, org_eclipse_jetty_servlet_max_age: ConfigNodePropertyInteger):\n\n self._org_eclipse_jetty_servlet_max_age = org_eclipse_jetty_servlet_max_age",
"def set_max_edges(self, edges):\n self.max_edges = edges",
"async def max(self, ctx, limit: int):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"max\"] = limit\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Warn limit is now: \\n{}\".format(limit))",
"def adjust_age(self):\n try:\n from tools import valid_units\n except ImportError as e:\n print(\"Necessary import failed: {}\".format(e))\n if not valid_units(self.age_units):\n print(\"Given unit is not supported: {}\".format(self.age_units))\n raise ValueError()\n if self.age_units == \"day\":\n if self.age < 7:\n return\n elif self.age < 30:\n self.age = self.age//7\n self.age_units = \"week\"\n elif self.age < 365:\n self.age = self.age//30\n self.age_units = \"month\"\n else:\n self.age = self.age//365\n self.age_units = \"year\"\n elif self.age_units == \"week\":\n if self.age < 4:\n return\n elif self.age < 52:\n self.age = self.age//4\n self.age_units = \"month\"\n else:\n self.age = self.age//52\n self.age_units = \"year\"\n elif self.age_units == \"month\":\n if self.age < 12:\n return\n else:\n self.age = self.age//12\n self.age_units = \"year\"",
"def maximal_completion_delay_in_days(self, maximal_completion_delay_in_days):\n\n self._maximal_completion_delay_in_days = maximal_completion_delay_in_days"
] | [
"0.6970839",
"0.6911315",
"0.61830574",
"0.6088173",
"0.60299325",
"0.58305186",
"0.58305186",
"0.57887924",
"0.56967527",
"0.5692167",
"0.5583454",
"0.5548023",
"0.5548023",
"0.55217004",
"0.54962564",
"0.54864573",
"0.5448077",
"0.5405468",
"0.5401819",
"0.53459024",
"0.5305077",
"0.52902114",
"0.5246656",
"0.52185863",
"0.5204188",
"0.5197784",
"0.5183826",
"0.5176835",
"0.5155243",
"0.5128057"
] | 0.6915784 | 1 |
Sets "threshold" for the starboard messages. The specified number of stars are required to put the message on the starboard. Note that existing messages are not affected. Defaults to 5. | async def threshold(self, ctx, threshold: int):
if threshold > 0:
self.bot.db.execute("UPDATE starboards SET threshold = ? WHERE channel_id = ?", (threshold, ctx.channel.id))
await ctx.say("star.threshold", threshold)
await self.set_topic(ctx.channel.id)
else:
await ctx.say("star.unsigned", threshold) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def set_star_thresh(self, ctx: commands.Context, thresh: int):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n\n self.starboard_guilds[str(ctx.guild.id)][\"thresh\"] = thresh\n\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"The amount of stars needed to get a message to the starboard is now {0}\".format(thresh))",
"async def _msgvote_threshold(self, ctx, threshold: int):\n\n if threshold < 0:\n await self.bot.say(\"Invalid threshold. Must be a positive \"\n \"integer, or 0 to disable.\")\n elif threshold == 0:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Message deletion disabled.\")\n else:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Messages will be deleted if [downvotes - \"\n \"upvotes] reaches {}.\".format(threshold))",
"def threshold(self,thresholdValue):\n # TO DO\n pass",
"def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass",
"def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)",
"def setThreshold(self, value):\n return self._set(threshold=value)",
"def OnBitmapButton5StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 5\r\n\t\tself.SetStars()",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"async def handle_less(message: types.Message):\n await handle_change_threshold(message, 1 / 1.5)",
"def block5_threshold(self):\n return self._safe_value(VAR_BLOCK5THRESHOLD, float)",
"def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)",
"def setThreshold1(self, trsh):\n\t\tself.edgeThreshold1 = trsh\n\t\tself.edgeThreshold2 = trsh * 2.5",
"def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)",
"def set_threshold(self, cat, t):\n self.con.execute(\"update ct set threshold=%f where category='%s'\" \n % (t, cat))",
"def bound_rating(self, rating):\n return 1.0 * max(0, min(int(rating + 0.5), 5))\n # return 1.0 * max(0, min(rating, 5))",
"def moving_threshold(self, value):\n self._write(MX_MOVING_THRESHOLD, value)",
"def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0",
"def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)",
"def thresh(self, thresh=25, total_ratings=False):\n before = self.item_count()\n\n if total_ratings: self.filter(self.n_per_item() >= thresh)\n else: self.filter(np.all(self.lam() >= thresh, axis=0))\n\n after = self.item_count()\n thresh_type = 'on each item total' if total_ratings else 'by each group' \n with msg(f'Applying threshold of {thresh} ratings {thresh_type} : {after} of {before}', done=False, enabled=self.output):pass",
"def set_threshold_from_energy(energy, dryRun=False):\n if energy < 3.5:\n print(\"WARNING: optimal energy threshold should normally be set to half of the beam energy, but some noise will appear below energy threshold of 3.5 keV!\")\n caput(\"BL13J-EA-EXCBR-01:CONFIG:ACQUIRE:EnergyThreshold\",energy)",
"def _thresholdAlarm(self, project_usage, proj_name, old_used_space_perc):\n\n message = \"\"\n threshold_soft = 95\n threshold_hard = 100\n if (proj_name in self.conf.mirrored_projects):\n threshold_soft = 95*2\n threshold_hard = 100*2\n if ((project_usage['used_space_perc'] > threshold_soft) and \n (project_usage['used_space_perc'] > old_used_space_perc)):\n message = \"project \" + proj_name + \" is reaching its quota limit \" \\\n + \"(used space > 95%): \" + str(project_usage['used_space'])\\\n + \" \" + self.conf.storage_space_unity\n if (project_usage['used_space_perc'] >= threshold_hard):\n message = \"project \" + proj_name + \" reached its quota limit \" \\\n + \"(used space > 100%): \" + str(project_usage['used_space'])\\\n + \" \" + self.conf.storage_space_unity\n if (len(message) > 0):\n mailsnd = MailSender()\n mailsnd.send(message, self.conf.notification_sender, \n self.conf.notification_receiver)\n logger.debug(\"sent alert for quota over limit related to project: \"\n + proj_name)",
"def thresholdfactor(self):\n return self.__thresholdfactor",
"def setMoveThreshold(self, thresholdLoc, thresholdRot):\r\n self.moveThresholdLoc = thresholdLoc\r\n self.moveThresholdRot = thresholdRot",
"def set_photon_counting_thres(self, mini, maxi):\n self.lib.SetPhotonCountingThreshold(ct.c_long(mini), ct.c_long(maxi))",
"def setAmbiguityThreshold(self, value):\n return self._set(ambiguityThreshold=value)",
"def set_warning_song(self, song_number):\n self._warning_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._warning_song_num) + \" 2 67 16 64 16\"\n\n self._serial_conn.send_command(cmd)"
] | [
"0.71225744",
"0.59684855",
"0.5927058",
"0.58108085",
"0.576199",
"0.57090366",
"0.5671583",
"0.5666817",
"0.5666817",
"0.5666817",
"0.5666817",
"0.5666817",
"0.5508571",
"0.5468851",
"0.5445576",
"0.5442838",
"0.538356",
"0.53599596",
"0.5337971",
"0.5335528",
"0.5254369",
"0.5232643",
"0.5196285",
"0.5116185",
"0.5096018",
"0.50832146",
"0.5030812",
"0.5021622",
"0.49949834",
"0.49876738"
] | 0.7315309 | 0 |
Shows a starboard item. The argument can be either original message ID or starboard item ID. | async def star_show(self, ctx, item: Star):
board = self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?", (ctx.guild.id,)).fetchone()
try:
board_msg = await self.bot.get_channel(board["channel_id"]).fetch_message(item["item_id"])
except discord.NotFound:
return await self.destroy_item(board["channel_id"], item["item_id"])
else:
await ctx.send(board_msg.content, embed=board_msg.embeds[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(self, item_id):\n pass",
"async def star_random(self, ctx):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n item = self.bot.db.execute(\n \"SELECT item_id FROM starboard_items WHERE visible = 1 \" \\\n \"ORDER BY random() LIMIT 1\"\n ).fetchone()\n if not item:\n return\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])",
"def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()",
"def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)",
"def show_item(self, show_item):\n\n self._show_item = show_item",
"async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)",
"def show(*args):\n I = Items()\n for arg in args:\n I.add_item(arg)\n I.write()",
"def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))",
"def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)",
"def display_message(self, message, subtitle=None, arg=None):\n if message is None:\n # Display same message as the placeholder\n message = self.placeholder\n xml = alfred.xml([\n alfred.Item(\n title=message,\n subtitle=subtitle,\n attributes={\n 'uid': alfred.uid(0),\n 'arg': arg\n },\n icon='icon.png',\n )\n ]) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n exit()",
"def show_item(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tcontext_dict = {}\n\tsearch_form = Search_bar()\n\tcontext_dict['search_bar'] = search_form\n\tcontext_dict['item'] = item\n\tcontext_dict['seller_rating'] = range(int(round(item.seller.rating, 1)))\n\n\trelated = Item.objects.filter(category = item.category).exclude(itemID = item.itemID)\n\t\n\tif len(related) > 3:\n\t\tcontext_dict['trendingItems'] = related[0:3]\n\telse:\n\t\tcontext_dict['trendingItems'] = related\n\n\tresponse = render(request, 'tailored/product.html', context_dict)\n\t\n\tif first_visit(request, response, str(item.itemID)):\n\t\titem.dailyVisits += 1\n\t\titem.save()\n\t\t\n\tcontext_dict['itemID'] = item.itemID\n\n\tif item.seller.user != request.user:\n\t\treturn response\n\n\tsold_form = SoldItemForm()\n\n\tif request.method == 'POST':\n\t\tsold_form = SoldItemForm(request.POST, request.FILES)\n\n\t\tif sold_form.is_valid():\n\t\t\tuser_query = User.objects.filter(username = sold_form.cleaned_data['sold_to'])\n\t\t\tif not user_query:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\n\t\t\telif user_query[0] != request.user:\n\t\t\t\ttry:\n\t\t\t\t\titem.sold_to = UserProfile.objects.get(user = user_query[0])\n\t\t\t\t\titem.save()\n\t\t\t\texcept UserProfile.DoesNotExist:\n\t\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\telse:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError(\"You can't sell an item to yourself.\"))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\titem.save()\n\t\t\treturn HttpResponseRedirect(reverse('tailored:index'))\n\n\tcontext_dict['form'] = sold_form\n\treturn render(request, 'tailored/product.html', context_dict)",
"def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)",
"def show_item_details(item_id):\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return render_template('item_details.html', item=item, login_session=login_session)",
"async def info(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'I need embed_links permission to answer in this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n canonical = await Controller.canonical_title(item)\n if canonical:\n item = canonical\n page_url = Controller.link_from_title(item)\n try:\n wikitext = await Controller.get_wikitext(item)\n except ValueError as e:\n # Means the page is not found\n await msg.channel.send(**{\n 'content': f'No page found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n contents = []\n template_names = []\n for template in WTP.parse(wikitext).templates:\n template_names.append(template.name.strip())\n if self.is_infobox(template.name):\n args = template.arguments\n title = item\n entries = {}\n for arg in args:\n k, v = arg.string.strip(' |\\n').split('=')\n k = k.strip()\n v = v.strip()\n if k.lower() in ['title1', 'name']:\n # Set this as the item name\n title = v\n elif k.lower() in ['image1', 'image'] or not v:\n # Skip images and empty values\n continue\n else:\n entries[k] = v.replace('\\n\\n', '\\n').replace('\\n', '\\n\\t')\n entries = [f'{k} = {v}' for k, v in entries.items()]\n entries = '• '+'\\n• '.join(entries)\n content = f'## **{title}** ##\\nSource: {page_url}\\n{template.name.strip()}\\n{entries}'\n contents.append(content)\n logging.info(f'Templates at {item}: '+', '.join(template_names))\n if not contents:\n await msg.channel.send(**{\n 'content': f'No infobox found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n await msg.channel.send(**{\n 'content': '\\n===\\n'.join(contents),\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def markPlayed(self, item):\n key = f'{self.METADATA}/actions/scrobble'\n ratingKey = item.guid.rsplit('/', 1)[-1]\n params = {'key': ratingKey, 'identifier': 'com.plexapp.plugins.library'}\n self.query(key, params=params)\n return self",
"def viewItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n item = session.query(Item).filter_by(id=item_id).one()\n return render_template('viewitem.html', sport_id=sport_id, item_id=item_id,\n item=item, sport=sport)",
"def toggle_item_starred(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_starred = self.selected_item.starred\n message = 'Starred flag is now ON'\n if was_starred:\n message = 'Starred flag is now OFF'\n self.trigger_item_starred(not was_starred)\n self.controller.display_message(message)",
"def show_item_by_effect(plugin, item_id, effect_plugin, effect_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'effect': effect_plugin + ':' + effect_id,\n 'id': plugin + ':' + item_id\n })",
"def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)",
"def item_starred(self, item):\n self.update_item(item)",
"def __str__(self):\n return \"Item('\"+ self.get_id() + \"')\"",
"def show_item(self, event):\n\t\tc=self.seqframe\n\t\tbox = c.bbox(CURRENT)\n\t\tx1=box[0]\n\t\ty1=box[1]\n\t\tx2=box[2]\n\t\ty2=box[3]\n\t\titems=[]\n\t\t#make selection rectangle one pixel larger to include rect and text\n\t\titems=c.find_enclosed(x1-1,y1-1,x2+1,y2+1)\n\t\t#get this for recog sequence\n\t\tenzymes=self.RS.enzymes_regexs\n\t\t\n\t\tsfont = tkFont.Font (family='Arial', size=12,weight='bold')\n\t\tfor obj in items:\n\t\t\tc.tag_raise(obj)\n\t\t\t#if item is text, get recog sequence and display\n\t\t\tif 'textlabel' in c.gettags(obj):\n\t\t\t\tname=c.itemcget(obj, 'text')\n\t\t\t\tname=name.rstrip('(+-)')\n\t\t\t\tseq=self.get_sequence(enzymes[name]['realseq'])\n\t\t\t\tobj=c.create_text(x2+2,y1-2,text=seq,tags='recogseqlabel',\n\t\t\t\t\t\t\tfont=sfont,width=120,anchor='nw')\n\t\t\t\tbox = c.bbox(obj)\n\t\t\t\trect = c.create_rectangle(box,tag='recogseqlabel',fill='yellow')\n\t\t\t\tc.lift(obj)",
"def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)",
"def show_info(title, message):\n\n pass",
"async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")",
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def showInfo(parent,message,title=_('Information')):\r\n return askStyled(parent,message,title,wx.OK|wx.ICON_INFORMATION)",
"def display(self, item: Any):\n self.display_widget.clear_output(wait=True)\n with self.display_widget:\n self.display_function(item)\n self.clear() # type: ignore",
"def faqitem_show(request,item_container):\n app_name = 'faqitem'\n parent = item_container.get_parent()\n if parent.item.has_comments:\n comments = item_comment(request, item_container=item_container)\n else:\n comments = ''\n vars = get_item_vars_show(request, item_container, app_name)\n vars['comments'] = comments\n return render_to_response ( 'app/faqitem/base-item.html', vars )"
] | [
"0.6767876",
"0.62626845",
"0.6151777",
"0.6020385",
"0.5975807",
"0.58423346",
"0.57976115",
"0.5691685",
"0.5685633",
"0.5602847",
"0.5571166",
"0.5512835",
"0.5495824",
"0.54660666",
"0.54644984",
"0.54629415",
"0.545546",
"0.54352176",
"0.541161",
"0.541123",
"0.54102796",
"0.54056174",
"0.5404639",
"0.5386514",
"0.5322978",
"0.5317937",
"0.5279676",
"0.5261988",
"0.5258057",
"0.52341443"
] | 0.8043702 | 0 |
Enables/disables DM when your message was stared. If the parameter is not given, this returns current status. Can be used anywhere including DM. | async def star_dm(self, ctx, enable: bool = None):
if enable is None:
result = self.bot.db.execute("SELECT starboard_dm FROM users WHERE user_id = ?", (ctx.author.id,)).fetchone()
enabled = result["starboard_dm"] if result else 0
status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enabled]}")
return await ctx.say("star.dmCurrent", status_str)
self.bot.db.execute("UPDATE users SET starboard_dm = ? WHERE user_id = ?",(
int(enable),
ctx.author.id
))
status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enable]}")
return await ctx.say("star.dmCurrent", status_str) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)",
"async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")",
"async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)",
"def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)",
"def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()",
"async def pmguard(message: Message):\n global pmCounter # pylint: disable=global-statement\n if Config.ALLOW_ALL_PMS:\n Config.ALLOW_ALL_PMS = False\n await message.edit(\"`PM_guard activated`\", del_in=3, log=__name__)\n else:\n Config.ALLOW_ALL_PMS = True\n await message.edit(\"`PM_guard deactivated`\", del_in=3, log=__name__)\n pmCounter.clear()\n await SAVED_SETTINGS.update_one(\n {'_id': 'PM GUARD STATUS'}, {\"$set\": {'data': Config.ALLOW_ALL_PMS}}, upsert=True)",
"def getStatus(self):\n return self.enabled",
"async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)",
"async def greeter_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"greeter_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Greeter is now **enabled**\")\n else:\n await util.send_success(ctx, \"Greeter is now **disabled**\")",
"async def async_turn_on(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()",
"def enable(self):\n return self._packet.get('enable', False)\n\n # TODO: TCONT and GEM lists",
"async def dmsettings(self, ctx):\n\n emojify_settings = self.bot.get_cog(\"Server\").emojiy_settings\n settings = await self.ensure_dm_settings(ctx.author.id)\n\n mute_kick_ban = emojify_settings(settings['ban_kick_mute'])\n leg_session_open = emojify_settings(settings['leg_session_open'])\n leg_session_update = emojify_settings(settings['leg_session_update'])\n leg_session_submit = emojify_settings(settings['leg_session_submit'])\n leg_session_withdraw = emojify_settings(settings['leg_session_withdraw'])\n\n embed = self.bot.embeds.embed_builder(title=f\"Direct Messages for {ctx.author.name}\",\n description=f\"Check `{config.BOT_PREFIX}help dms` for help on \"\n f\"how to enable or disable these settings.\\n\\n\"\n f\"{mute_kick_ban} DM when you get muted, kicked or banned\\n\"\n f\"{leg_session_open} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"a Legislative Session opens\\n\"\n f\"{leg_session_update} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"voting starts for a Legislative Session\\n\"\n f\"{leg_session_submit} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone submits a Bill or Motion\\n\"\n f\"{leg_session_withdraw} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone withdraws a Bill or Motion\\n\",\n has_footer=False)\n await ctx.send(embed=embed)",
"def do(self):\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = \"\"\n property_val = this_server.read_property(\"SdpMasterFQDN\")[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(\n const.CMD_Disable, None, self.disable_cmd_ended_cb\n )\n self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)\n this_server.write_attr(\n \"activityMessage\", const.STR_DISABLE_CMS_SUCCESS, False\n )\n\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f\"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}\"\n tango.Except.re_throw_exception(\n dev_failed,\n const.ERR_INVOKING_CMD,\n log_msg,\n \"SdpMasterLeafNode.DisableCommand()\",\n tango.ErrSeverity.ERR,\n )",
"def get_status(self):\n return super(Cabling, self).get_status()",
"def enabled(self):\n return self._packet.get('enabled', True)",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"def sms_disabled(self):\n return self._sms_disabled",
"def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()",
"async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)",
"def enabled(self):\n return self._get('enabled')",
"async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return",
"async def _cmdf_pmenable(self, substr, msg, privilege_level):\n enabled_str = None\n if utils.str_says_true(substr) or (len(substr) == 0):\n self._pm_msg_isenabled = True\n enabled_str = \"enabled.\"\n else:\n self._pm_msg_isenabled = False\n enabled_str = \"disabled.\"\n self._save_settings()\n\n buf = \"PM greetings is now \" + enabled_str\n await self._client.send_msg(msg, buf)\n return",
"async def set_enabled(self, enabled: bool) -> None:\n return await self.api.set_enabled(enabled)",
"def enablement_state(self):\n return self.__enablement_state",
"def on_message(self, msg):\n self.enabled = (msg == \"ON\")\n self.log.info(\"%s received %s command for logic actuator\",\n self.name, \"enable\" if self.enabled else \"disable\")",
"def get_status():\n return ('off', 'off')",
"async def legsessionvoting(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_update\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you are \" \\\n f\"a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and voting starts for a Legislative Session.\"\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are \" \\\n f\"a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and voting starts for a Legislative Session.\"\n\n await ctx.send(message)",
"def status(self):\n ret = self.dev.ctrl_transfer(0xc0, 0x01, 0x0081, 0x0000, 0x0001)\n if ret[0] == 0xa0:\n return self.POWER_ON\n return self.POWER_OFF",
"def getSafetyMute(self, unitCode=0):\n resp = self.XAPCommand('SFTYMUTE', unitCode=unitCode)\n return bool(int(resp))",
"async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)"
] | [
"0.64651364",
"0.59721977",
"0.5788894",
"0.5637827",
"0.559156",
"0.55866206",
"0.5582838",
"0.5560499",
"0.5545492",
"0.5535075",
"0.55287397",
"0.5520025",
"0.5509821",
"0.5500232",
"0.5459743",
"0.54503405",
"0.544722",
"0.54312956",
"0.5386221",
"0.5380486",
"0.53682333",
"0.5346718",
"0.5323133",
"0.5304755",
"0.53033173",
"0.5300116",
"0.5281752",
"0.528049",
"0.5278697",
"0.52782613"
] | 0.6530001 | 0 |
Finds the postion that a value of weight "weight" would fall in the weight_list, where weight_list is sorted by smallest to largest. Newer inputs win in ties. | def find_pos(weight, weight_list):
bool_list = [weight >= x for x in weight_list]
pos = bool_list.count(True) - 1
return pos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]",
"def solve_brute_force(n: int, W: int, weight: List[int], value: List[int]) -> int:\n mapped_items = [{\"w\": w, \"v\": v} for i, (w, v) in enumerate(zip(weight, value))]\n\n maximum_value: int = 0\n updated: bool = False\n for i in range(1, n + 1):\n if i > 1 and not updated:\n break\n\n updated = False\n for chosen_items in list(combinations(mapped_items, i)):\n sum_weight = 0\n sum_value = 0\n for item in chosen_items:\n sum_weight += item[\"w\"]\n sum_value += item[\"v\"]\n\n if sum_weight <= W and maximum_value < sum_value:\n updated = True\n maximum_value = sum_value\n return maximum_value",
"def solve_dp(n: int, W: int, weight: List[int], value: List[int]) -> int:\n dp = [[0] * (W + 1) for i in range(n + 1)]\n\n for i, (w, v) in enumerate(zip(weight, value)):\n for j in range(W + 1):\n dp[i + 1][j] = dp[i][j]\n if j - w >= 0:\n dp[i + 1][j] = max(dp[i][j], dp[i][j - w] + v)\n\n return dp[n][W]",
"def get_rank(weight):\n weight = min(1.0, max(weight, 0.0))\n ranks = [x for x in ALL_RANKS if weight >= x.min_weight]\n ranks.sort(key=lambda x: x.min_weight)\n return ranks.pop()",
"def weighted_choice(list_, weights=None):\n size = len(list_)\n if weights is not None:\n assert size == len(weights)\n\n if weights is None:\n probs = np.array([1 / float(size) for i in range(size)])\n else:\n probs = np.array(weights) / sum(weights) # just in case\n\n rand = np.random.random()\n\n _sum = 0\n for i in range(size):\n if _sum <= rand < _sum + probs[i]:\n choice = i\n break\n else:\n _sum += probs[i]\n\n return list_[choice]",
"def weighted_wheel_selection(weights: List[float]) -> int:\n\n cumulative_sum = np.cumsum(weights)\n prob = r.generate_uniform_random_number() * cumulative_sum[-1]\n\n for i, c_sum in enumerate(cumulative_sum):\n if c_sum > prob:\n return i\n\n return None",
"def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i",
"def _weighted_choice(self, lst):\n \n total_weight = reduce(lambda x,y:x+y, [tup[1] for tup in lst])\n n = random.uniform(0, total_weight)\n for item, weight in lst:\n if n < weight:\n break\n n = n - weight\n return item",
"def select(weights):\n r = random.random() * sum(weights)\n s = 0.0\n for k,w in enumerate(weights):\n s += w\n if r <= s:\n return k\n raise RuntimeError(\"select WTF from %s\" % weights)",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def sample_from(self, weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0:\n return i # weights[0] + ... + weights[i] >= rnd",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def randpck(elements, rand_function):\n\n # First, we compute the total weight (for example 10)\n total_weight = 0\n for e in elements:\n assert e[1] >= 0\n total_weight += e[1]\n\n # Then we generate a random number multiplied by the total weight (e.g. 0.4218 * 10 = 42.18)\n random_weight = rand_function() * total_weight\n\n # Lastly, we run through the list to find which one matches with the generated weight\n current_weight = 0\n for e in elements:\n current_weight += e[1]\n if random_weight < current_weight:\n return e[0]\n\n return None",
"def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def weighted_choice(choices, weight):\n\t# requirements = random\n\tweights = []\n\t# get weight values for each of the choices\n\tfor choice in choices:\n\t\tchoice_weight = weight(choice)\n\t\tif not (isinstance(choice_weight, int) and choice_weight > 0):\n\t\t\traise TypeError('weight results must be positive integers')\n\t\tweights.append(choice_weight)\n\n\t# make a selection within the acceptable range\n\tselection = random.randint(0, sum(weights) - 1)\n\n\t# find and return the corresponding choice\n\tfor idx, choice in enumerate(choices):\n\t\tif selection < sum(weights[:idx + 1]):\n\t\t\treturn choice\n\traise RuntimeError('no selection could be made')",
"def pick_weighted(weights, vals, eps=1.0e-4):\n\t\n\tweightSum = cumsum(weights)\n\tif weightSum[-1] == 0:\n\t\treturn random.choice(vals)\n\tif abs(weightSum[-1]-1.0) > eps:\n\t\traise RuntimeError(\"Weights don't sum to 1\")\n\tr = random.uniform(0.0,1.0)\n\tfor v,w in zip(vals, weightSum):\n\t\tif r > w:\n\t\t\tcontinue\n\t\treturn v\n\treturn vals[-1]",
"def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item",
"def dp_make_weight(egg_weights, target_weight, memo={}):\n\n \"\"\"\n 根据提示: 每个pound类型的蛋是无限的。\n 问题是提供一种蛋的组合,最好pound数等于或是接近总的weight 并且要满足数量要越少越好。\n 这是两个限制条件。但是提示也给了总是有egg为value1的,那么难度小了很多。\n 现在是怎样让蛋的数量越少越好。\n \n 1.最优子结构\n egg_weights 现在假设是(1, 5, 10, 25)\n dp_make_weight((1, 5, 10, 25),x,memo) , 当x - n >= 0 时(n代表 1,5,10,25),\n 然后在 dp_make_weight((1,5,10,25,x-n,memo) +1 中 挑选最小值。+1的原因是包含本次\n 2.重叠子问题\n 详见ps1b的图片。\n 那么memo记录的key 为 avail(即剩余的容量) ,value 为avail下最小的蛋的数量n。\n \n 那么base_case是什么?\n target == 0时,返回0\n 现在按照深度优先的思路思考\n \"\"\"\n\n if target_weight == 0:\n return 0\n\n if target_weight in memo:\n return memo[target_weight]\n\n result = None # 占位符,没有多大用\n\n for elt in egg_weights:\n if target_weight - elt >= 0: # 这样才有继续探索的必要\n tmp_result = dp_make_weight(egg_weights, target_weight - elt, memo) + 1\n if result is None or tmp_result < result:\n result = tmp_result\n memo[target_weight] = result\n return result",
"def get_min_weight_index(weights: list, mst_set: set) -> int:\n min_weight = math.inf\n index = 0\n\n for i in range(len(weights)):\n if weights[i] < min_weight and i not in mst_set:\n min_weight = weights[i]\n index = i\n\n return index",
"def find_probability(problist, listoffive):\n\tprobs = []\n\tfor i in listoffive:\n\t\tprobs.append(problist[i])\n\ttotprob = 1\n\tfor n in probs:\n\t\ttotprob = totprob * n\n\treturn totprob",
"def weighted_score(counters, lst, weight):\n if counters == None:\n counters = {}\n\n\n for item in lst:\n if item in counters:\n counters[item] += weight\n else:\n counters[item] = weight\n\n return counters",
"def find_best_match(organ: Organ, wait_list: WaitList,\n weights: Dict[int, float]) -> Optional[Patient]:\n # ANSI codes to emphasize output\n bold_red, red, reset = '\\033[31;1m', '\\033[31m', '\\033[0m'\n matches = wait_list.get_prioritized_patients(organ)\n\n # returns the patient with the highest priority within acceptable proximity\n while len(matches) != 0:\n patient = heapq._heappop_max(matches) # type: ignore\n if organ.viability >= weights[patient.location] - 10:\n return patient\n\n # in the event there are no matches\n print(f'\\n{bold_red}The following organ has no suitable matches:'\n f'\\n{red}{organ.__str__()}{reset}')\n return None",
"def dp_make_weight(egg_weights, target_weight, memo = {}):\r\n # construct table. outer loop: egg weights. inner loop: 0-target_weight\r\n # table will be stored in memo. key=egg_weight, value=list, indexed from 0-target_weight\r\n for i, w in enumerate(egg_weights):\r\n # initialize key-value pair for a given egg weight. Value is empty list to be filled in inner loop.\r\n memo[w] = []\r\n for j in range(target_weight + 1):\r\n # if weight is 0, no eggs\r\n if j == 0:\r\n memo[w].append(0)\r\n # if egg_weight is less than weight, minimize number of eggs\r\n elif w <= j:\r\n # to minimize: take the min of (using prior denomination to get same weight, using current denomation to get weight)\r\n # first item=prior egg value, same weight\r\n # second item=\"sub\" current egg value by subtracting it from weight and adding 1 to egg total\r\n \r\n # if first egg weight, no need to look at \"row\" above to minimize\r\n if i == 0:\r\n min_eggs = memo[w][j-w] + 1\r\n else:\r\n min_eggs = min(memo[egg_weights[i-1]][j], memo[w][j-w] + 1)\r\n memo[w].append(min_eggs)\r\n # else if egg_weight is more than weight, take prior denomination min number of eggs at j\r\n else:\r\n memo[w].append(memo[egg_weights[i-1]][j])\r\n\r\n # access bottom right value to get minimum number of coins (largest egg_weight at target_weight)\r\n # uncomment below to only returns min number of eggs\r\n #return memo[egg_weights[-1]][target_weight]\r\n\r\n # determine makeup of min number of egg: \r\n # cur_weight to keep track as we subtract from total weight\r\n cur_weight = target_weight\r\n \r\n # egg_choices: a dict that holds how many of each egg_weight are in the optimal solution\r\n egg_choices = {}\r\n \r\n #print(memo)\r\n \r\n # outer loop goes backwards from highest to smallest egg weight\r\n for i in range(len(egg_weights)-1, -1, -1):\r\n # check if equal to memo[i-1][j] (row above, same column). if not equal, i is in the set.\r\n while egg_weights[i] <= cur_weight:\r\n # also if smallest egg weight, keep subtracting until we get 0\r\n if i == 0 or (memo[egg_weights[i]][cur_weight] != memo[egg_weights[i-1]][cur_weight]):\r\n # if they are not equal, add to the count of i in the egg_choices dict\r\n if egg_weights[i] in egg_choices.keys():\r\n egg_choices[egg_weights[i]] += 1\r\n else:\r\n egg_choices[egg_weights[i]] = 1\r\n # subtract from current weight the egg weight accounted for\r\n cur_weight -= egg_weights[i]\r\n \r\n # break if all weight accounted for\r\n if cur_weight == 0:\r\n break\r\n \r\n # string together the min number of eggs and the composition\r\n out = str(memo[egg_weights[-1]][target_weight]) + ' ('\r\n \r\n # list of formatted value * key pairs\r\n eggs = []\r\n for key, value in egg_choices.items():\r\n eggs.append(str(value) + ' * ' + str(key))\r\n \r\n # join key/value pairs together\r\n out += ' + '.join(eggs)\r\n \r\n # finish off the string\r\n out += ' = ' + str(target_weight) + ')'\r\n return out",
"def selection_wheel(self, weighted_population):\n weight_total = sum((item[1] for item in weighted_population))\n n = random.uniform(0, weight_total)\n for item, weight in weighted_population:\n if n < weight:\n return item\n n = n - weight\n return item",
"def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1",
"def get_best_match(self, list):\n raise NotImplementedError",
"def knapsack(weights):\n\n n = len(weights)\n max_sum = sum(weights)\n\n result = []\n\n dp = [False for _ in range(max_sum + 1)]\n dp[0] = True\n\n for i in range(1, n + 1):\n # update dp from right to left for each new weight\n for x in range(max_sum, -1, -1):\n if dp[x]:\n dp[x + weights[i - 1]] = True\n\n for i in range(len(dp)):\n if dp[i]:\n result.append(i)\n\n return result # returns all possible sums that can be constructed given a list of weights\n\n # return dp"
] | [
"0.7426046",
"0.6773496",
"0.6738176",
"0.6716903",
"0.67166317",
"0.6681879",
"0.6489452",
"0.647155",
"0.6244381",
"0.6119967",
"0.6119967",
"0.6119967",
"0.6103172",
"0.5987175",
"0.59611946",
"0.5952065",
"0.5946552",
"0.594247",
"0.59135944",
"0.5880885",
"0.5846692",
"0.5805101",
"0.5796779",
"0.57954824",
"0.57887274",
"0.57846737",
"0.5746038",
"0.5729218",
"0.5725849",
"0.57252014"
] | 0.7051767 | 1 |
Adjusts top10 list in ascending order, by inserting a new item in appropriate place and adjusting others appropriately | def adjust_top10(value, pos, weight, top10, top10weights):
# Create new top10 to be adjusted
newtop10 = top10
newtop10weights = top10weights
# Keep higher ones, shift lower ones left one
newtop10[0:pos] = top10[1:pos + 1]
newtop10weights[0:pos] = top10weights[1:pos + 1]
# add new ones
newtop10[pos] = value
newtop10weights[pos] = weight
return (newtop10, newtop10weights) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top10(self, top10: List[Word]):\n\n self._top10 = top10",
"def move_top ( self ):\n list, index = self.get_info()\n self.value = [ list[index] ] + list[:index] + list[index+1:]",
"def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items",
"def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)",
"def top_n(items, n):\n\n for i in range(n):\n for j in range(len(items)-1-i):\n\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j]\n \n top_n = items[-n:]\n\n return top_n[::-1]",
"def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)",
"def add_top_pairs(dry_run=False, pair_now=False):\n top = ratings.top_n(15)\n new_pairs = []\n for idx, t in enumerate(top[:10]):\n new_pairs += [[t[0], o[0]] for o in top[idx+1:idx+5]]\n\n if dry_run:\n print(new_pairs)\n return\n\n if pair_now:\n maybe_enqueue(new_pairs)\n else:\n _append_pairs(new_pairs)",
"def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]",
"def update_highscores(self):\n for i in range(len(self.highscores)):\n if self.score >= self.highscores[i]:\n self.highscores.insert(i, self.score)\n self.highscores.pop()\n break",
"def _set_top(self, user_n, item_n):\n self.user_n = user_n\n self.item_n = item_n",
"def find_top_unique(self, list_of_entries, top_n):\n\n\n if len(list_of_entries) < top_n:\n self.top_n_too_large_label = Label(self.main_frame,\n fg=\"red\",\n text=\"Max N = %s\" % len(list_of_entries))\n if type(list_of_entries[0]) is AudioEntry:\n self.top_n_too_large_label.grid(row=13, column=4)\n if type(list_of_entries[0]) is VideoEntry:\n self.top_n_too_large_label.grid(row=13, column=5)\n raise Exception(\"N is larger than the total number of words\")\n\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n\n sorted_by_count = sorted(list_of_entries, key=self.get_count, reverse=True)\n #self.top_n_too_large_label = Label(self.main_frame, fg=\"red\", text=\"Max N = %s\" % len(list_of_entries))\n unique_entries = [[] for i in range(top_n)]\n\n curr_rank = 0\n prev_count = None\n curr_count = None\n\n for entry in sorted_by_count:\n\n if entry.word in self.general_parser.words:\n entry.in_general = True\n else:\n entry.in_general = False\n\n curr_count = entry.count\n\n if prev_count is None:\n if entry.word not in self.specific_month_words:\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = 1\n continue\n\n\n if curr_rank >= top_n:\n break\n\n\n if entry.word not in self.specific_month_words:\n # increment rank if current entry has a different count\n # (the last set of entries having this count are all filled\n # into the unique_entries[])\n if curr_count != prev_count:\n curr_rank = curr_rank + 1\n if curr_rank >= top_n:\n break\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = curr_rank + 1\n continue\n unique_entries[curr_rank].append(entry)\n entry.rank = curr_rank + 1\n\n\n\n return unique_entries[0:curr_rank + 1]",
"def pizza_sort(lst):\n length = len(lst)\n def based_god_help_me(lst,index=0):\n if index == length - 1:\n return\n greatest = index_largest(lst[index:]) + index\n lst[greatest], lst[index] = lst[index], lst[greatest]\n based_god_help_me(lst,index+1)\n return based_god_help_me(lst)",
"def insertionSort(list):",
"def get_top_10(data: List[EmissionPerCapita], current_year: int) -> List[EmissionPerCapita]:\r\n\r\n # Get the first 10 elements in data\r\n top_10_so_far = []\r\n for i in range(10):\r\n top_10_so_far.append(data[i])\r\n\r\n # Get the index for the current year.\r\n index = current_year - data[0].start_year\r\n\r\n # Mutate top_10_so_far to get the highest 10.\r\n for emission in data:\r\n for value in top_10_so_far:\r\n if value.epc_year[index] < emission.epc_year[index] and emission not in top_10_so_far:\r\n list.remove(top_10_so_far, value)\r\n list.append(top_10_so_far, emission)\r\n\r\n return top_10_so_far",
"def evaluate_elf(calories):\n if calories > top_three[2]:\n top_three.append(calories)\n elif calories > top_three[1]:\n top_three.popleft()\n top_three.insert(1, calories)\n elif calories > top_three[0]:\n top_three[0] = calories",
"def update_order():",
"def update_order():",
"def pizza_sort(lst):\n def help_func(lst, i):\n if i == len(lst) - 1:\n return lst\n else:\n tem1 = lst[i]\n tem2 = index_largest(lst[i:]) + i\n lst[i] = lst[index_largest(lst[i:]) + i]\n lst[tem2] = tem1\n return help_func(lst, i+1)\n return help_func\n help_func(lst, i=0)",
"def put_sorted_cards(result, cards, weight):\n result.append((cards2str(sort_cards(cards)), weight))",
"def cocktail_sort(num_list):\n\n # Setting variables\n start_index = 0\n end_index = len(num_list) - 1\n swapped = True\n\n while swapped:\n\n # Pass moves up\n swapped = False\n for i in range(start_index, end_index, 1):\n # Exchanges items\n if num_list[i] > num_list[i + 1]:\n temp = num_list[i]\n num_list[i] = num_list[i + 1]\n num_list[i + 1] = temp\n swapped = True\n end_index -= 1\n\n # Pass moves down\n swapped = False\n for i in range(end_index, start_index, -1):\n # Exchanges items\n if num_list[i] < num_list[i - 1]:\n temp = num_list[i]\n num_list[i] = num_list[i - 1]\n num_list[i - 1] = temp\n swapped = True\n start_index += 1",
"def top10(self) -> List[Word]:\n return self._top10",
"def get_item_based_topk(self, items, top_k=10, sort_top_k=False):\n\n # convert item ids to indices\n item_ids = items[self.col_item].map(self.item2index)\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()",
"def InsertSort(num_list):\n for i in range(1,len(num_list)):\n for j in range (i,0,-1):\n if num_list[j]<num_list[j-1]:\n num_list[j],num_list[j-1] = num_list[j-1],num_list[j]\n return num_list",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def perc_up(self, i):\r\n while i // 2 > 0:\r\n if self.items[i] > self.items[i // 2]:\r\n tmp = self.items[i // 2]\r\n self.items[i // 2] = self.items[i]\r\n self.items[i] = tmp\r\n i = i // 2",
"def insert_top_to_bottom(deck: List[int]) -> None:\n last = deck[-1]\n\n if last == max(deck):\n last = last - 1\n first_part = deck[:last]\n second_part = deck[last: -1]\n del deck[:]\n deck.extend(second_part)\n deck.extend(first_part)\n deck.append(last + 1)\n\n else:\n first_part = deck[:last]\n second_part = deck[last: - 1]\n del deck[:]\n deck.extend(second_part)\n deck.extend(first_part)\n deck.append(last)",
"def bring_to_front(self,itmkey):\n itms = self.get_items_list()\n if itmkey in itms:\n itm = itms[itmkey]\n z = itm['z']\n for k,it in itms.items():\n if it['z'] > z:\n it['z'] -= 1\n itm['z'] = len(itms)\n self.put_items_list(itms)\n return {'k':itmkey,'z':itm['z']}\n return None",
"def recommend_k_items(self, test, top_k=10, sort_top_k=False, remove_seen=False):\n\n test_scores = self.score(test, remove_seen=remove_seen)\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test[self.col_user].drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()",
"def insert_top_to_bottom(deck):\n \n number = deck[-1]\n if number != get_big_joker_value(deck): \n middle = deck[number:-1]\n deck[:] = middle + deck[:number] + [number]",
"def insert_top_to_bottom(deck_of_cards):\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n last_card = deck_of_cards[len(deck_of_cards) - 1]\n if not last_card == big_joker_value:\n top_few_cards = deck_of_cards[: last_card]\n deck_of_cards.extend(top_few_cards)\n\t# top_few_cards represents portion of deck to last_card index. This is \n\t# added to the bottom of the deck.\n deck_of_cards[: last_card] = []\n\t# Removes the duplicates\n deck_of_cards.remove(deck_of_cards[-(last_card + 1)])\n deck_of_cards.append(last_card)\n # This then added the last card to the bottom of the deck."
] | [
"0.69200873",
"0.61177766",
"0.6019761",
"0.5785143",
"0.574743",
"0.56948155",
"0.5632971",
"0.5610252",
"0.5595934",
"0.557611",
"0.5559975",
"0.5536772",
"0.5520824",
"0.5512247",
"0.5477111",
"0.54510283",
"0.54510283",
"0.54265624",
"0.5416262",
"0.5411289",
"0.54062337",
"0.5388858",
"0.53738225",
"0.5361916",
"0.53570336",
"0.533932",
"0.5334352",
"0.5325912",
"0.5322592",
"0.5313717"
] | 0.72702825 | 0 |
Calculates the correction factor for ambient air temperature and relative humidity Based on the linearization of the temperature dependency curve under and above 20 degrees Celsius, asuming a linear dependency on humidity, | def get_correction_factor(self, temperature, humidity):
if temperature < 20:
return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD
return self.CORE * temperature + self.CORF * humidity + self.CORG | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))",
"def fRwTemperatureCorrected(Rw_Temp1, Temp1, Temp2):\n\treturn Rw_Temp1 * ((Temp1 + 21.5) / (Temp2 + 21.5))",
"def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49",
"def temperature() -> float:",
"def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff",
"def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def apply_weather_correction(\n enduse,\n fuel_y,\n cooling_factor_y,\n heating_factor_y,\n enduse_space_heating,\n enduse_space_cooling\n ):\n if enduse in enduse_space_heating:\n fuel_y = fuel_y * heating_factor_y\n elif enduse in enduse_space_cooling:\n fuel_y = fuel_y * cooling_factor_y\n\n return fuel_y",
"def factorizeLinearSum(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280):\n\n var = \"atm/TREFHT\"\n dT = eoi400 - e280\n\n dTCO2 = (2 * (e400 - e280) +\n (eo400 - eo280) +\n (ei400 - ei280) +\n 2 * (eoi400 - eoi280)) / 6\n\n dTtopo = (2 * (eo280 - e280) +\n (eo400 - e400) +\n (eoi280 - ei280) +\n 2 * (eoi400 - ei400)) / 6\n\n dTice = (2 * (ei280 - e280) +\n (ei400 - e400) +\n (eoi280 - eo280) +\n 2 * (eoi400 - eo400)) / 6\n\n return dT, dTCO2, dTtopo, dTice",
"def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity",
"def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)",
"def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;",
"def correct_temp(temp_tab):\n output = subprocess.check_output(\"cat /sys/class/thermal/thermal_zone0/temp\", shell=True)\n cpu_temp = int(output)/1000\n temp_calibrated = temp_tab - ((cpu_temp - temp_tab)/1.5)\n return temp_calibrated",
"def __getTemperatureCalibrationCoefficients(self):\n src10 = self.read_byte_data(self.address, 0x10)\n src11 = self.read_byte_data(self.address, 0x11)\n src12 = self.read_byte_data(self.address, 0x12)\n c0 = (src10 << 4) | (src11 >> 4)\n c0 = getTwosComplement(c0, 12)\n c1 = ((src11 & 0x0F) << 8) | src12\n c1 = getTwosComplement(c1, 12)\n return c0, c1",
"def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)",
"def water_correction_energies(fname, se_h2o_hof, se_h_hof, ref_h2o_ener,\n se_au=False, ref_au=True):\n check_for_keys(fname, REFEK, NATMK, SEEK)\n with h5.File(fname, 'r') as ifi:\n # This calculates the reference heat of formation\n # Note the reference is assumed to be in eH\n correction = ifi[REFEK][:] - ((ifi[NATMK][:]//3) * ref_h2o_ener)\n if ref_au:\n correction *= 627.509\n if se_au:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof) * 627.509\n else:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof)\n return correction",
"def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity",
"def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')",
"def linearbattfunc(x, IV):\n i1, i2, V1, V2 = IV\n di2 = a*io*(n*F)/(R*T)*(V1 - V2)\n #Kinetics\n di1 = -di2\n #charge neutrality\n dV1 = -i1/s\n #solids ohms law\n dV2 = -i2/K\n #liquids ohms law\n return di1, di2, dV1, dV2",
"def vct_resistance_correction(\n data: pd.DataFrame, data_TT_MDL: pd.DataFrame\n) -> pd.DataFrame:\n df_resistance = data.groupby(by=\"test type\").get_group(\"resistance\")\n result1 = least_squares(\n fun=error, x0=[0], kwargs={\"y\": df_resistance[\"fx\"], \"u\": df_resistance[\"u\"]}\n )\n\n data[\"fx\"] -= res(result1.x, u=data[\"u\"])\n\n R_m = data_TT_MDL[\"Rm [N]\"]\n fx = -R_m\n result2 = least_squares(\n fun=error,\n x0=result1.x,\n kwargs={\"y\": fx, \"u\": fx.index},\n )\n\n R_factor = 1.14\n # R_factor = 1.14 * 1.43\n data[\"fx\"] += R_factor * res(result2.x, u=data[\"u\"])\n\n return data",
"def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp",
"def factorizeScaledResidualAbs(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280):\n var = \"atm/TREFHT\"\n dT, dTCO2, dTtopo, dTice = factorize_Lunt2012(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280)\n\n res = eoi400-e280 - (dTCO2+dTtopo+dTice)\n\n dTCO2new = dTCO2 + (res*abs(dTCO2) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n dTtoponew = dTtopo + (res*abs(dTtopo) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n dTicenew = dTice + (res*abs(dTice) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n\n\n return dT, dTCO2, dTtopo, dTice, dTCO2new, dTtoponew, dTicenew",
"def calculate_dew_point(temp, hum):\n return temp - (100 - hum) / 5"
] | [
"0.67288923",
"0.67288923",
"0.63658255",
"0.6330217",
"0.6249179",
"0.61254853",
"0.60804427",
"0.591038",
"0.5909975",
"0.58495927",
"0.58438057",
"0.58438057",
"0.57863986",
"0.57804346",
"0.5774944",
"0.5772292",
"0.5767601",
"0.5738072",
"0.5698512",
"0.56778646",
"0.56462497",
"0.56458294",
"0.5644073",
"0.56332135",
"0.56271404",
"0.5626142",
"0.561992",
"0.56010747",
"0.55888635",
"0.5588817"
] | 0.7517804 | 1 |
Returns the resistance of the sensor in kOhms // 1 if not value got in pin | def get_resistance(self):
adc = ADC(self.pin)
value = adc.read()
if value == 0:
return -1
return (4095./value - 1.) * self.RLOAD | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023",
"def get_distance():\n \n GPIO.output(pinTrigger, False) # pulse off\n time.sleep(0.2)\n\n GPIO.output(pinTrigger,True) # send 10us pulse\n time.sleep(10e-6)\n GPIO.output(pinTrigger,False)\n\n StartTime = time.time() # start timer\n\n while GPIO.input(pinEcho)==0: # keep timer reset\n StartTime = time.time()\n\n while GPIO.input(pinEcho) == 1:\n StopTime = time.time()\n\n if StopTime - StartTime >= 0.04:\n print(\"Too close!!!\")\n StopTime = StartTime\n break\n\n ElapsedTime = StopTime - StartTime\n\n distance = (ElapsedTime * 34326)/2\n\n print('{:2.1f} cm'.format(distance))\n #dots = int(distance/2)\n #print('.'*dots)\n\n return(distance)",
"def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))",
"def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1",
"def min_humidity(self):\n return 0",
"def READ_PRESSURE_SENSOR():\n return 15.246",
"def input_resistance(self):\n return None",
"def gpio_read_analogue(self, pin: int) -> float:\n return randint(0, 500) / 100",
"def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))",
"def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def moisture(self):\n if self.moisture_sensor is None:\n return None\n else:\n return self.moisture_sensor.percent",
"def set_resistance(self):\n\t\tself.resistance = int(input(\"Enter the Value from (0-256)= \"))\n\t\tif self.resistance > 256 :\n\t\t\tself.resistance = int(input(\"Enter the Value from (0-256)= \"))\n\t\t\n\t\treturn self.resistance",
"def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)",
"def set_resistance(self, value):\n self.write(\":RES {}OHM\".format(value))",
"def get_psi(data):\n max_sensor_psi = 100 # Also 30\n psi = (data - 0.51) * (max_sensor_psi / 4)\n psi = round(psi, 0)\n return psi",
"def nitrogen_dioxide(self) -> float | None:\n return round_state(self._get_sensor_value(API_NO2))",
"def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def state(self):\n result = self.probe.get_data(SENSOR_TYPES[self.sensor][2])\n round_to = SENSOR_TYPES[self.sensor][3].get(\"round\")\n if round_to is not None:\n result = round(result, round_to)\n return result",
"def distance_sensor(unit):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Range\", Range, 2)\n\tdistance = data.range\n\t# transfer sensor data to target unit\n\tif unit == \"cm\":\n\t\tresult = distance / 10.0\n\telse:\n\t\tresult = distance\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def get_analog(self,pin):\n try:\n cmd = protocol.GET_ANALOG.format(pin)\n response = self.__send_and_receive(cmd)\n values = response.split(' ')\n printf(values, type=DEBUG)\n val = values[1][1:]\n return int(float(val))\n except Exception as e:\n printf(\"Error {}\".format(e))\n return None",
"def temperature() -> float:",
"def task(node_dict):\n # always check that the sensor has been initialized\n if node_dict['pn'].heading == empty_value:\n # if sensor is not reading, return no motor command\n return 0\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # p-control\n hout = hdiff * P\n # limit output if necassary\n if abs(hout) > r_max:\n hout = copysign(r_max, hout)\n return hout",
"def read_odometer(self):\n msg = f\"Car has {self.odometer_reading} miles on it.\"\n return msg",
"def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value",
"def read_odometer(self):\r\n print(\"This car has \"+str(self.odometer_reading)+\" miles on it.\")",
"def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58",
"def state(self):\n value = getattr(self.coordinator.senec, self._sensor)\n try:\n rounded_value = round(float(value), 2)\n return rounded_value\n except ValueError:\n return value"
] | [
"0.7418249",
"0.64756715",
"0.63563746",
"0.62588537",
"0.6185685",
"0.6180163",
"0.61774236",
"0.5992633",
"0.59730357",
"0.59586394",
"0.5931926",
"0.5876045",
"0.58549595",
"0.58542717",
"0.5832019",
"0.5829989",
"0.58182263",
"0.5806874",
"0.5794576",
"0.5794576",
"0.576387",
"0.5760282",
"0.57326984",
"0.5732564",
"0.56976",
"0.5657945",
"0.561256",
"0.56043583",
"0.5603248",
"0.55982035"
] | 0.71054703 | 1 |
Gets the resistance of the sensor corrected for temperature/humidity | def get_corrected_resistance(self, temperature, humidity):
return self.get_resistance()/ self.get_correction_factor(temperature, humidity) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD",
"def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))",
"def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)",
"def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)",
"def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))",
"def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh",
"def temperature() -> float:",
"def _calculate_heater_resistance(self, target_temp):\n if target_temp > 400: #Maximum temperature\n target_temp = 400\n\n var1 = (calGH1 / 16.0) + 49.0\n var2 = ((calGH2 / 32768.0) * 0.0005) + 0.00235\n var3 = calGH3 / 1024.0\n var4 = var1 * (1.0 + (var2 * target_temp))\n var5 = var4 + (var3 * self.calAmbTemp)\n res_heat = 3.4 * ((var5 * (4 / (4 + calResHeatRange)) * (1 / (1 + (calResHeatVal * 0.002)))) - 25)\n\n return int(res_heat)",
"def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp",
"def humidity(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"humidity\"))\r\n return round(self._humidity * 100)",
"def temperature(self):\n return self.read_short(65) / 340.0 + 36.53",
"def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625",
"def relative_humidity(self):\n humidity_string = self._current_observation['relative_humidity']\n return float(humidity_string.strip('%'))",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def humidity(self, update_temperature=True):\n if (self.t_fine is None) or update_temperature:\n self.temperature()\n\n adc_H = float(self.raw_humidity())\n var_H = self.t_fine - 76800.0\n var_H = (\n (adc_H - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * var_H)) *\n (self.dig_H2 / 65536.0 * (\n 1.0 + self.dig_H6 / 67108864.0 * var_H *\n (1.0 + self.dig_H3 / 67108864.0 * var_H)))\n )\n var_H = var_H * (1.0 - self.dig_H1 * var_H / 524288.0)\n\n if (var_H > 100.0):\n var_H = 100.0\n elif (var_H < 0.0):\n var_H = 0.0\n\n return round(var_H, 3)",
"def humidity(self):\r\n self._read_temperature()\r\n hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)\r\n #print(\"Humidity data: \", hum)\r\n adc = float(hum[0] << 8 | hum[1])\r\n #print(\"adc:\", adc)\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n var1 = float(self._t_fine) - 76800.0\r\n #print(\"var1 \", var1)\r\n var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)\r\n #print(\"var2 \",var2)\r\n var3 = adc - var2\r\n #print(\"var3 \",var3)\r\n var4 = self._humidity_calib[1] / 65536.0\r\n #print(\"var4 \",var4)\r\n var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)\r\n #print(\"var5 \",var5)\r\n var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5\r\n #print(\"var6 \",var6)\r\n var6 = var3 * var4 * (var5 * var6)\r\n humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)\r\n\r\n if humidity > _BME280_HUMIDITY_MAX:\r\n return _BME280_HUMIDITY_MAX\r\n if humidity < _BME280_HUMIDITY_MIN:\r\n return _BME280_HUMIDITY_MIN\r\n # else...\r\n return humidity",
"def get_rel_humidity_sensor(self) -> Tuple[str, int]:\n self.serial.write(b\"h!\")\n rhel_sensor = self.__read_response(1)[0]\n if rhel_sensor[0:3] == b\"!hh\":\n rhel_sensor = self.__extract_int(rhel_sensor, b\"!hh\")\n # if we get 65536, the sensor is not connected\n if rhel_sensor == 65535:\n raise CloudWatcherException(\n \"High precision RHEL/temp sensor not connected\"\n )\n return \"hh\", rhel_sensor\n else:\n rhel_sensor = self.__extract_int(rhel_sensor, b\"!h\")\n # if we get 100, the sensor is not connected\n if rhel_sensor == 100:\n raise CloudWatcherException(\n \"Low precision RHEL/temp sensor not connected\"\n )\n return \"h\", rhel_sensor",
"def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def READ_PRESSURE_SENSOR():\n return 15.246",
"def humidity(self):\n names = ['anc_air_relative_humidity']\n return self.sensor.get_with_fallback('humidity', names)",
"def get_temp(self):\n\t\traw_temp = self.read_i2c_word(self.TEMP_OUT0)\n\n\t\t# Get the actual temperature using the formule given in the\n\t\t# MPU-6050 Register Map and Descriptions revision 4.2, page 30\n\t\tactual_temp = (raw_temp / 340.0) + 36.53\n\n\t\treturn actual_temp",
"def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0",
"def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1",
"def getHumidity(self):\n return self.humidity",
"def humidity(self):\n return self._humidity",
"def read_humidity(self):\n self._force_read(False)\n\n humADC = (self._read_register_1ubyte(self.BME680_HUM_MSB) << 8) | (self._read_register_1ubyte(self.BME680_HUM_LSB))\n\n return float(self._compensate_humidity(humADC))",
"def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp"
] | [
"0.7635503",
"0.74764",
"0.69839656",
"0.68464166",
"0.6839649",
"0.67363864",
"0.6672957",
"0.663751",
"0.65953994",
"0.65851945",
"0.6578019",
"0.65570176",
"0.65442514",
"0.6527985",
"0.64984643",
"0.64984643",
"0.6488228",
"0.6481326",
"0.6471587",
"0.646373",
"0.639092",
"0.63794535",
"0.63506013",
"0.63406694",
"0.63393354",
"0.63317955",
"0.632059",
"0.630704",
"0.6301418",
"0.6294441"
] | 0.7932213 | 1 |
Returns the resistance RZero of the sensor (in kOhms) for calibratioin purposes | def get_rzero(self):
return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD",
"def r(self) -> float:\n return self._ohms.real",
"def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)",
"def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))",
"def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number",
"def r0(self):\n return self.p[0] / self.p[1]",
"def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def omtrek(self):\n x = pi*self.r**2\n return x",
"def residual(us):\n return self.h_S(z0, us) - h_P",
"def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta",
"def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh",
"def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp",
"def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2",
"def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))",
"def Z_rms(self) -> np.float64:\n return np.sqrt(np.sum(self.ionic_fractions * self.charge_numbers**2))",
"def get_nuclear_potential(self, r):\n\n return -self.nuclear_charge/r",
"def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)",
"def rrint(self):\n if len(self.data.peaks):\n return (np.diff(self.data._masked) / self.data.fs).compressed()",
"def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def get_RM_K(vsini_kms, rp_Rearth, Rs_Rsun):\n D = (rp_Rearth * u.Rearth.to(u.m) / Rs_Rsun * u.Rsun.to(u.m)) ** 2\n return (vsini_kms * D / (1 - D)) * 1e3",
"def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1",
"def input_resistance(self):\n return None",
"def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))",
"def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r",
"def rv_from_r0v0(mu, R0, V0, t):\n #...Magnitudes of R0 and V0:\n r0 = norm(R0)\n v0 = norm(V0)\n #...Initial radial velocity:\n vr0 = np.dot(R0, V0)/r0\n #...Reciprocal of the semimajor axis (from the energy equation):\n alpha = 2/r0 - pow(v0,2)/mu\n #...Compute the universal anomaly:\n x = kepler_U(mu, t, r0, vr0, alpha)\n #...Compute the f and g functions:\n f, g = calc_f_g(mu, x, t, r0, alpha)\n #...Compute the final position vector:\n R = f*R0 + g*V0\n #...Compute the magnitude of R:\n r = norm(R)\n #...Compute the derivatives of f and g:\n fdot, gdot = calc_fdot_gdot(mu, x, r, r0, alpha)\n #...Compute the final velocity:\n V = fdot*R0 + gdot*V0\n return R, V"
] | [
"0.75763285",
"0.75763285",
"0.72106373",
"0.70308226",
"0.6785906",
"0.6573991",
"0.6407301",
"0.62468076",
"0.61387134",
"0.60672534",
"0.6046186",
"0.6046186",
"0.59931934",
"0.5981607",
"0.5975342",
"0.5957084",
"0.59491",
"0.59407234",
"0.5909753",
"0.58624816",
"0.5808946",
"0.5779981",
"0.5773813",
"0.5759319",
"0.5757578",
"0.57565176",
"0.57490623",
"0.57175994",
"0.5717317",
"0.5715654"
] | 0.7964137 | 1 |
Returns the resistance RZero of the sensor (in kOhms) for calibration purposes corrected for temperature/humidity | def get_corrected_rzero(self, temperature, humidity):
return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)",
"def r(self) -> float:\n return self._ohms.real",
"def calibrate_high(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage <= 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n # if enough values are okay:\n else:\n # get \"high range\" coefficients\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n # calculate density\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho",
"def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))",
"def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh",
"def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)",
"def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho",
"def residual(us):\n return self.h_S(z0, us) - h_P",
"def rv_from_r0v0(mu, R0, V0, t):\n #...Magnitudes of R0 and V0:\n r0 = norm(R0)\n v0 = norm(V0)\n #...Initial radial velocity:\n vr0 = np.dot(R0, V0)/r0\n #...Reciprocal of the semimajor axis (from the energy equation):\n alpha = 2/r0 - pow(v0,2)/mu\n #...Compute the universal anomaly:\n x = kepler_U(mu, t, r0, vr0, alpha)\n #...Compute the f and g functions:\n f, g = calc_f_g(mu, x, t, r0, alpha)\n #...Compute the final position vector:\n R = f*R0 + g*V0\n #...Compute the magnitude of R:\n r = norm(R)\n #...Compute the derivatives of f and g:\n fdot, gdot = calc_fdot_gdot(mu, x, r, r0, alpha)\n #...Compute the final velocity:\n V = fdot*R0 + gdot*V0\n return R, V",
"def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1",
"def estimate_R0(self, model, disease=None, **kwargs) -> ValueStd:\n return self._estimate_R(fit.estimate_R0, model, disease, **kwargs)",
"def get_rx_calibrate (self, rx_calibrate):\n\t\treturn self._rx_calibrate",
"def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))",
"def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number",
"def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp",
"def r0(self):\n return self.p[0] / self.p[1]",
"def calibration(self) -> int:",
"def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta",
"def min_humidity(self):\n return 0",
"def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def calibrate(self):\n super().calibrate()\n dataH1 = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_H1, 1)\n dataHX = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_HX, 7)\n\n self.dig_H1 = float(c_ubyte(dataH1[0]).value)\n self.dig_H2 = float(c_short((dataHX[1] << 8) + dataHX[0]).value)\n self.dig_H3 = float(c_ubyte(dataHX[2]).value)\n self.dig_H4 = float(c_short(\n (dataHX[3] << 4) + (dataHX[4] & 0xf)).value)\n self.dig_H5 = float(c_short(\n (dataHX[5] << 4) + ((dataHX[4] & 0xf0) >> 4)).value)\n self.dig_H6 = float(c_byte(dataHX[6]).value)",
"def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)",
"def temperature() -> float:",
"def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))"
] | [
"0.7580876",
"0.7580876",
"0.6772141",
"0.6595775",
"0.627289",
"0.627289",
"0.6242239",
"0.62241274",
"0.5995406",
"0.5957256",
"0.59282094",
"0.5875282",
"0.58737624",
"0.57592624",
"0.5742674",
"0.5665439",
"0.56521314",
"0.5646416",
"0.5641885",
"0.56370544",
"0.56231505",
"0.55898166",
"0.5518144",
"0.54900455",
"0.5483646",
"0.54810745",
"0.547866",
"0.547111",
"0.5470423",
"0.54554224"
] | 0.78341985 | 1 |
Find and create a configuration for Boost. prefix Where to find sofiasip, should sofiasip/sip.h. | def __init__(self, prefix = None):
# Compute the search path.
if prefix is None:
test = [Path('/usr'), Path('/usr/local')]
else:
test = [Path(prefix)]
self.__prefix = self._search_all('include/sofia-sip-1.12/sofia-sip/sip.h', test)[0]
self.__config = drake.cxx.Config()
self.__config.add_system_include_path(self.__prefix / 'include/sofia-sip-1.12')
self.__config.lib_path(self.__prefix / 'lib') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()",
"def getapxs_location():\n return getconfigure_option(\"APXS\")",
"def provide_felix_config(self):\n # First read the config values, so as to avoid unnecessary\n # writes.\n prefix = None\n ready = None\n iface_pfx_key = key_for_config('InterfacePrefix')\n try:\n prefix = self.client.read(iface_pfx_key).value\n ready = self.client.read(READY_KEY).value\n except etcd.EtcdKeyNotFound:\n LOG.info('%s values are missing', CONFIG_DIR)\n\n # Now write the values that need writing.\n if prefix != 'tap':\n LOG.info('%s -> tap', iface_pfx_key)\n self.client.write(iface_pfx_key, 'tap')\n if ready != 'true':\n # TODO Set this flag only once we're really ready!\n LOG.info('%s -> true', READY_KEY)\n self.client.write(READY_KEY, 'true')",
"def test_replace_namespaced_build_config(self):\n pass",
"def includeme(root):\n configure.scan(\"backend.services\")\n configure.scan(\"backend.content\")\n configure.scan(\"backend.install\")",
"def config_locator():\n print(pkgrs.resource_filename('latools', 'latools.cfg'))\n return",
"def compose_defines():\n return \"\"\"\nLIBPBDATA_INC ?=../pbdata\nLIBPBIHDF_INC ?=../hdf\nLIBBLASR_INC ?=../alignment\nLIBPBDATA_LIB ?=%(thisdir)s/pbdata/\nLIBPBIHDF_LIB ?=%(thisdir)s/hdf/\nLIBBLASR_LIB ?=%(thisdir)s/alignment/\nnohdf ?=1\n\"\"\"%(dict(thisdir=thisdir))",
"def locate_config(confname, app_name, prefix='etc', verbose=False):\n candidates = []\n app_config_dir = ('%s_CONFIG_DIR' % app_name).upper()\n if app_config_dir in os.environ:\n candidate = os.path.normpath(\n os.path.join(os.environ[app_config_dir], confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath(os.path.join(\n os.path.dirname(os.path.dirname(sys.executable)),\n prefix, app_name, confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath('/%s/%s/%s' % (prefix, app_name, confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath(os.path.join(os.getcwd(), confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n if candidates:\n if verbose:\n LOGGER.info(\"config loaded from '%s'\", candidates[0])\n return candidates[0]\n else:\n LOGGER.warning(\"config '%s' was not found.\", confname)\n return None",
"def configure(conf):\n conf.find_program(\"doxygen\", var=\"DOXYGEN\")\n conf.find_program(\"dot\", var=\"DOT\")",
"def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))",
"def setup_lib(CLIB):\n # {{ SETUP_LIB }}",
"def base_install():\n # scwrl\n scwrl = {}\n print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\\n'\n 'All required input can use tab completion for paths.\\n'\n '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours))\n scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False)\n scwrl['path'] = str(scwrl_path)\n pack_mode = get_user_option(\n 'Please choose your packing mode (flexible is significantly slower but is more accurate).',\n ['flexible', 'rigid'])\n if pack_mode == 'rigid':\n scwrl['rigid_rotamer_model'] = True\n else:\n scwrl['rigid_rotamer_model'] = False\n settings['scwrl'] = scwrl\n\n # dssp\n print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours))\n dssp = {}\n dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False)\n dssp['path'] = str(dssp_path)\n settings['dssp'] = dssp\n\n # buff\n print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours))\n buff = {}\n ffs = []\n ff_dir = isambard_path / 'buff' / 'force_fields'\n for ff_file in os.listdir(str(ff_dir)):\n ff = pathlib.Path(ff_file)\n ffs.append(ff.stem)\n force_field_choice = get_user_option(\n 'Please choose the default BUFF force field, this can be modified during runtime.',\n ffs)\n buff['default_force_field'] = force_field_choice\n settings['buff'] = buff\n return",
"def includeme(config):",
"def configure(self, spec, prefix):\n options = getattr(self, \"configure_flag_args\", [])\n options += [\"--prefix={0}\".format(prefix)]\n options += self.configure_args()\n\n with working_dir(self.build_directory, create=True):\n inspect.getmodule(self).configure(*options)",
"def _configure_namespaces(api):\n\t#{{cookiecutter.app_name}}_namespace\n\tapi.add_namespace({{cookiecutter.app_name}}_namespace)",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def test_patch_namespaced_build_config(self):\n pass",
"def configure(_workdir):\n\n global workdir\n workdir = _workdir\n\n from os.path import join\n from ConfigParser import ConfigParser\n config = ConfigParser(dict(here=workdir))\n config.read(join(workdir, 'rnaseqlyze.ini'))\n\n for name, value in config.items(\"rnaseqlyze\"):\n globals()[name] = value\n\n import Bio.Entrez\n Bio.Entrez.email = admin_email",
"def test_create_namespaced_build_config(self):\n pass",
"def pibooth_configure(cfg):",
"def setup(conf, products, build=False):\n import distutils.sysconfig\n paths = {\n \"CPPPATH\": distutils.sysconfig.get_python_inc().split(),\n \"LIBPATH\": [],\n }\n libs = []\n dir = distutils.sysconfig.get_config_var(\"LIBPL\")\n if not dir in paths[\"LIBPATH\"]:\n paths[\"LIBPATH\"] += [dir]\n pylibrary = distutils.sysconfig.get_config_var(\"LIBRARY\")\n mat = re.search(\"(python.*)\\.(a|so|dylib)$\", pylibrary)\n if mat:\n libs.append(mat.group(1)) \n for w in (\" \".join([distutils.sysconfig.get_config_var(\"MODLIBS\"),\n distutils.sysconfig.get_config_var(\"SHLIBS\")])).split():\n mat = re.search(r\"^-([Ll])(.*)\", w)\n if mat:\n lL = mat.group(1)\n arg = mat.group(2)\n if lL == \"l\":\n if not arg in libs:\n libs.append(arg)\n else:\n if os.path.isdir(arg) and not arg in paths[\"LIBPATH\"]:\n paths[\"LIBPATH\"].append(arg)\n conf.env.PrependUnique(**paths)\n for lib in libs:\n if lib not in conf.env.libs[\"python\"]:\n conf.env.libs[\"python\"].append(lib)\n return {\"paths\": paths, \"libs\": {\"python\": libs}}",
"def build_config(\n *, quiet: bool, release: str, sp_osi: str | None, tag_suffix: str | None\n) -> Config:\n\n def osi_version() -> str:\n \"\"\"Determine the sp-osi version to use; parse \"wip\" in a special way.\"\"\"\n if sp_osi is None:\n return find.find_sp_osi_version()\n\n if sp_osi == \"wip\":\n return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX\n\n return sp_osi\n\n return Config(\n topdir=find.find_topdir(),\n release=release,\n sp_osi_version=osi_version(),\n tag_suffix=tag_suffix if tag_suffix is not None else _build_tag_suffix(),\n verbose=not quiet,\n )",
"def phone_config(self, sip_server: str = \"\") -> None:",
"def _setup_applications(self):\n if 'host_nfs_path' in self.config['settings'] and 'guest_nfs_path' in self.config['settings']:\n self.settings['nfs'] = NFSSettings(host_vm_nfs_path=self.config['settings']['host_nfs_path'],\n guest_vm_nfs_path=self.config['settings']['guest_nfs_path'])\n\n self._setup_printer()",
"def configure(self) -> None:",
"def extractBINS( configPy, var ):\n\n\t#TODO: Better a temporary file\n\ttry:\n\t\tshutil.copy( configPy, '_tmpPy.py')\n\texcept IOError:\n\t\tmessage = '\\033[1;31mError: There is no config File named %s\\033[1;m' % configPy\n\t\traise IOError, message\n\t# To be sure the first import is FWCore.ParameterSet.Config \n\t# in order to extract BINS\n\t_file = open('_tmpPy.py','r')\n\t_lines = _file.readlines()\n\t_file.close()\n\t_lines.insert(0,'import FWCore.ParameterSet.Config as cms\\n')\n\t_file = open('_tmpPy.py','w')\n\t_file.writelines(_lines)\n\t_file.close()\n\t# Append the working directory to do the import\n\tsys.path.append( os.getcwd() )\n\t#------------------------------------------------------------ \n\t\n\ttry:\n\t\tfrom _tmpPy import BINS\n\texcept ImportError:\n\t\tmessage = '\\033[1;31mError: There is no BINS in %s file. Are you sure this is a config python to do the fit?\\033[1;m' % configPy\n\t\tos.remove('_tmpPy.py')\n\t\traise ImportError, message\n\n\tvariables = BINS.parameterNames_()\n\t# Check if the variables introduced by the user are inside\n\t# the fit config python\n\tfor i in var:\n\t\tif i not in variables:\n\t\t\tos.remove('_tmpPy.py')\n\t\t\tmessage = \"\"\"\\033[1;31mError: The variable %s is not in the parameter BINS of the config python %s. \nCheck your config or change your input variable with --var option\\033[1;m \"\"\" % ( i, configPy)\n\t\t print message\n raise KeyError\n\n\t# All was fine. Remember: first variable is the pt-like (construct the weights respect it)\n\tPT = var[0]\n\tETA = var[1]\n\n\t#bins = BINS\n\ttry:\n\t\tos.remove( '_tmpPy.py' )\n\t\tos.remove( '_tmpPy.pyc' )\n\texcept OSError:\n\t\tpass\n\n\treturn BINS,PT,ETA",
"def test_read_namespaced_build_config(self):\n pass"
] | [
"0.5301707",
"0.52438116",
"0.51499075",
"0.5118139",
"0.50677866",
"0.5059345",
"0.5040578",
"0.50159454",
"0.49063885",
"0.48804155",
"0.48284978",
"0.4827175",
"0.4806287",
"0.4769773",
"0.47503495",
"0.47349274",
"0.47349274",
"0.47349274",
"0.47349274",
"0.4719681",
"0.47054222",
"0.4700979",
"0.46805796",
"0.4662344",
"0.46299243",
"0.46288693",
"0.46256593",
"0.46201238",
"0.46129155",
"0.4596091"
] | 0.67315805 | 0 |
Transliterate and clean username by removing any unsupported character | def clean_username(value):
if NO_ASCII_REGEX.search(value):
value = unidecode(value)
value = NO_ASCII_REGEX.sub('', value)
value = NO_SPECIAL_REGEX.sub('', value)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize_username(username):\n\n regex = compile(UnicodeUsernameValidator.regex)\n normalized_username = \"\"\n for char in username:\n if not regex.match(char):\n continue\n normalized_username += char\n return normalized_username",
"def clean_username(self, username):\n return username.lower()",
"def prepare_username(username):\n username = username.upper()\n\n if not username.startswith('\\-'):\n return username\n\n return username.replace('\\-', '-', 1)",
"def normalize_username(value):\n return value.lower()",
"def raw_username(username):\n sitewide_domain = settings.HQ_ACCOUNT_ROOT\n username = str(username or '')\n username = username.lower()\n try:\n u, d = username.split(\"@\")\n except Exception:\n return username\n if d.endswith('.' + sitewide_domain):\n return u\n else:\n return username",
"def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name",
"def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")",
"def _username_from_name(self, name):\r\n return name.replace(' ', '_')",
"def clean_username(self):\n if self.edit_user is None:\n # checks for alnum and that this user doesn't already exist\n return super(RegisterUserForm, self).clean_username()\n # just checks for alnum\n if not self.cleaned_data['username'].isalnum():\n raise forms.ValidationError(_(u'Please enter a username containing only letters and numbers.'))\n return self.cleaned_data['username']",
"def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name",
"def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")",
"def make_name2(u):\n\treturn re.sub(r'\\s+', '', u).lower()",
"def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()",
"def clean_username(self):\n data = self.cleaned_data['username']\n if '@' in data or '|' in data or ' ' in data or '+' in data:\n raise forms.ValidationError(_(u'Usernames should not have special characters.'))\n try:\n user = User.objects.get(username__exact=self.cleaned_data['username'])\n except User.DoesNotExist:\n return self.cleaned_data['username']\n raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))",
"def _scrub(self, string):\n if not string.isalnum():\n raise ValueError(\"Table name cannot include non-alphanumerics.\")\n return string",
"def sanitize(value):\n from re import sub\n from unicodedata import normalize\n value = normalize('NFKD', value).encode('ascii', 'ignore')\n value = sub('[^\\w\\s\\.-]', '', value.decode('utf-8')).strip().lower()\n return sub('[-_\\s]+', '_', value)",
"def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()",
"def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text",
"def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())",
"def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word",
"def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)",
"def to_clean_str(s: str) -> str:\n return re.sub(\"[^a-zA-Z0-9]\", \"\", s).lower()",
"def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))",
"def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()",
"def invalid_username(username):\n word_letters = re.sub('[^a-zA-Z-0-9]+', '', str(username))\n if any(item.isalpha() for item in word_letters):\n return False\n return True",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def clean(s):\n punctuations = \"-,.?!;:\\n\\t()[]\\\"-\"\n return s.translate(None, string.punctuation).lower()",
"def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet"
] | [
"0.8276948",
"0.7640557",
"0.748814",
"0.7462814",
"0.6971219",
"0.6806881",
"0.6668218",
"0.6659604",
"0.6582828",
"0.6548192",
"0.6532645",
"0.65318185",
"0.65207565",
"0.6505552",
"0.6473751",
"0.64688873",
"0.6456",
"0.64483285",
"0.64419836",
"0.64402777",
"0.6427039",
"0.6384738",
"0.6369296",
"0.6352192",
"0.63462204",
"0.63444954",
"0.63430846",
"0.6333575",
"0.6319038",
"0.62954724"
] | 0.8559825 | 0 |
Replacement of ore.alchemist.container.stringKey The difference is that here the primary_key is not determined by sqlalchemy.orm.mapper.primary_key_from_instance(obj) but by doing the logically equivalent (but a little more laborious) [ getattr(instance, c.name) for c in mapper.primary_key ]. This is because, in some hardtodebug cases, the previous was returning None to all pk values e.g. for objects on which checkPermission() has not been called. Using this version, the primary_key is correctly determined irrespective of whether checkPermission() had previously been called on the object. | def stringKey(obj):
unproxied = proxy.removeSecurityProxy(obj)
mapper = orm.object_mapper(unproxied)
#primary_key = mapper.primary_key_from_instance(unproxied)
identity_values = [ getattr(unproxied, c.name) for c in mapper.primary_key ]
identity_key = "-".join(map(str, identity_values))
return "obj-%s" % (identity_key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key(self):\n def validate(name):\n '''Compute the key if necessary and validate'''\n found = getattr(self, name)\n value = found() if callable(found) else found\n if value is None:\n raise BadKeyError(\"The key for %s cannot be None\" % self)\n return str(value) \n if self.__key is None:\n namespace, kind, key = Schema.Get(self)\n self.__id = key\n value = validate(key)\n self.__key = Key(namespace, kind, value)\n else:\n self.__key.id = validate(self.__id)\n return self.__key",
"def _get_obj_pk(self, obj):\n if self.use_natural_keys and hasattr(obj, 'natural_key'):\n raw_nat_key = obj.natural_key()\n obj_pk = smart_text(NATURAL_KEY_JOINER.join(raw_nat_key))\n keytype = 'natural'\n else:\n obj_pk = obj._get_pk_val()\n keytype = 'pk'\n\n return obj_pk, keytype",
"def primary_key(cls):\n has_multiple_pk = len(class_keys(cls)) > 1\n\n if has_multiple_pk:\n # guess the pk\n pk = cls.__name__.lower() + '_id'\n else:\n for key in class_keys(cls):\n pk = key\n break\n\n if not pk in cls.__dict__:\n # could not find pk field in class, now check\n # whether it has been explicitly specified\n if 'pk_field' in cls.__dict__:\n pk = cls.__dict__['pk_field']\n else:\n raise KeyNotFoundException(\"Could not figure out primary key field\"\n \"for %s model. Tried to first use %s as\"\n \" field name,and then looked for\"\n \" pk_field attr which was also missing\"\n % (cls.__name__, pk))\n\n return pk",
"def get_primary_key(cls) -> str:\n return inspect(cls).primary_key[0].name",
"def primary_key(cls):\n\n if cls.__from_class__:\n cls = cls.__from_class__\n return cls.__table__.primary_key.columns.values()[0].name",
"def get_key_id(self):",
"def parent_model_object_to_key(self, parent_object: 'Any') -> 'Any':\n key = tuple(getattr(parent_object, pk) for pk in self.parent_model_pks)\n return key",
"def test_primary_key(self):\r\n\r\n # This should just work.\r\n class AutoFieldKey(models.Model):\r\n key = models.AutoField(primary_key=True)\r\n AutoFieldKey.objects.create()\r\n\r\n # This one can be exactly represented.\r\n class CharKey(models.Model):\r\n id = models.CharField(primary_key=True, max_length=10)\r\n CharKey.objects.create(id='a')\r\n\r\n # Some rely on unstable assumptions or have other quirks and\r\n # should warn.\r\n\r\n# # TODO: Warning with a range limitation.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class IntegerKey(models.Model):\r\n# id = models.IntegerField(primary_key=True)\r\n# IntegerKey.objects.create(id=1)\r\n\r\n# # TODO: date/times could be resonably encoded / decoded as\r\n# # strings (in a reversible manner) for key usage, but\r\n# # would need special handling and continue to raise an\r\n# # exception for now\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DateKey(models.Model):\r\n# id = models.DateField(primary_key=True, auto_now=True)\r\n# DateKey.objects.create()\r\n\r\n# # TODO: There is a db.Email field that would be better to\r\n# # store emails, but that may prevent them from being\r\n# # used as keys.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class EmailKey(models.Model):\r\n# id = models.EmailField(primary_key=True)\r\n# EmailKey.objects.create(id='[email protected]')\r\n\r\n# # TODO: Warn that changing field parameters breaks sorting.\r\n# # This applies to any DecimalField, so should belong to\r\n# # the docs.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DecimalKey(models.Model):\r\n# id = models.DecimalField(primary_key=True, decimal_places=2,\r\n# max_digits=5)\r\n# DecimalKey.objects.create(id=1)\r\n\r\n # Some cannot be reasonably represented (e.g. binary or string\r\n # encoding would prevent comparisons to work as expected).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class FloatKey(models.Model):\r\n id = models.FloatField(primary_key=True)\r\n FloatKey.objects.create(id=1.0)\r\n\r\n # TODO: Better fail during validation or creation than\r\n # sometimes when filtering (False = 0 is a wrong key value).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class BooleanKey(models.Model):\r\n id = models.BooleanField(primary_key=True)\r\n BooleanKey.objects.create(id=True)\r\n len(BooleanKey.objects.filter(id=False))",
"def primary_key(self) -> str:\n return self.model._meta.pk.name # type: ignore",
"def object_pk(self):\n\n if self._wrapped not in (None, empty):\n return str(self._wrapped.pk)\n\n if '_object_pk' in self.__dict__:\n return self.__dict__['_object_pk']\n\n identifier = self._get_identifier()\n if identifier:\n # noinspection PyBroadException\n try:\n object_pk = identifier.split('.', 2)[-1]\n if object_pk == 'None':\n object_pk = None\n self.__dict__['_object_pk'] = object_pk\n return object_pk\n except Exception:\n pass\n\n raise AttributeError()",
"def primary(self):\n primary_k = self.__class__.get_primary()\n return getattr(self, primary_k)",
"def pk(self):\n return getattr(self, self.schema.pk.name, None)",
"def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)",
"def get_datastore_key(model, pk):\n\n kind = get_top_concrete_parent(model)._meta.db_table\n return Key.from_path(kind, pk)",
"def _primary_key_names(obj):\n return [key.name for key in _get_mapper(obj).primary_key]",
"def keyify_obj(o):\n return keyify(o.content_type.pk, o.pk)",
"def _get_key(key_or_id, key_cls):\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )",
"def get_primary_id(self):",
"def persistent_cache_key_adapter(obj):\n # pylint: disable=protected-access\n if obj._p_oid:\n return hex(int.from_bytes(obj._p_oid, byteorder='big'))[2:]\n return hex(id(obj))[2:]",
"def key(self):\n return self._key if self._key else self.factory().key",
"def get_key(self, obj):\n if hasattr(obj, \"id\"):\n hashed_id = hashlib.md5(str(obj.id).encode(\"utf-8\")).hexdigest()\n return hashed_id\n else:\n return None",
"def _cache_key(cls, pk, db):\r\n key_parts = ('o', cls._meta, pk, db)\r\n return ':'.join(map(encoding.smart_unicode, key_parts))",
"def pk(self, ctx):\n\n #if (self._pk == False):\n if True:\n pk_cols = []\n for col in self.columns:\n if col.pk:\n pk_cols.append(col)\n\n if (len(pk_cols) > 1):\n raise Exception(\"Table %s has multiple primary keys: %s\" % (self.name, pk_cols))\n elif (len(pk_cols) == 1):\n self._pk = pk_cols[0]\n else:\n self._pk = None\n\n return self._pk",
"def instance_key(model, instance_or_pk):\r\n\r\n return '%s.%s:%d' % (\r\n model._meta.app_label,\r\n model._meta.module_name,\r\n getattr(instance_or_pk, 'pk', instance_or_pk),\r\n )",
"def _get_raw_key(self, key_id):",
"def get_key(self):\n return self._determine_key()",
"def get_object_id(self, key):\n try:\n return self.key_object[key]\n except KeyError:\n return None",
"def get_pk(self):\n return getattr(self, self.get_pk_name(), None)",
"def keyify(self):\n return keyify_obj(self)",
"def test_primary_key_coercing(self):\r\n CharKey.objects.create(id=1)\r\n CharKey.objects.create(id='a')\r\n CharKey.objects.create(id=1.1)\r\n CharKey.objects.get(id='1')\r\n CharKey.objects.get(id='a')\r\n CharKey.objects.get(id='1.1')\r\n\r\n IntegerKey.objects.create(id=1)\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.create(id='a')\r\n IntegerKey.objects.create(id=1.1)\r\n IntegerKey.objects.get(id='1')\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.get(id='a')\r\n IntegerKey.objects.get(id=1.1)"
] | [
"0.69380665",
"0.67975974",
"0.6636642",
"0.6627335",
"0.64218795",
"0.6354907",
"0.6309344",
"0.62846273",
"0.622991",
"0.62281317",
"0.61777174",
"0.61190313",
"0.6101327",
"0.6091566",
"0.6083226",
"0.6061366",
"0.6051213",
"0.6004258",
"0.5950747",
"0.5949102",
"0.59490883",
"0.59411365",
"0.5940988",
"0.59337777",
"0.5926337",
"0.59218293",
"0.5921642",
"0.59214526",
"0.5879429",
"0.5867949"
] | 0.7004091 | 0 |
Updates the puzzle state based on the provided move string | def update_puzzle(self, move_string):
zero_row, zero_col = self.current_position(0, 0)
for direction in move_string:
if direction == "l":
assert zero_col > 0, "move off grid: " + direction
self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]
self._grid[zero_row][zero_col - 1] = 0
zero_col -= 1
elif direction == "r":
assert zero_col < self._width - 1, "move off grid: " + direction
self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]
self._grid[zero_row][zero_col + 1] = 0
zero_col += 1
elif direction == "u":
assert zero_row > 0, "move off grid: " + direction
self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]
self._grid[zero_row - 1][zero_col] = 0
zero_row -= 1
elif direction == "d":
assert zero_row < self._height - 1, "move off grid: " + direction
self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]
self._grid[zero_row + 1][zero_col] = 0
zero_row += 1
else:
assert False, "invalid direction: " + direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print",
"def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)",
"def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out",
"def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state",
"def setBoard( self, moveString ): \n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'",
"def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"",
"def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'",
"def setBoard(self, moveString):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X':\n nextCh = 'O'\n else:\n nextCh = 'X'",
"def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': \n nextCh = 'O'\n else: nextCh = 'X'",
"def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location",
"def sim_move(self, state, move):\n out = ''\n for val in self.moves[move]:\n out += state[val]\n return out",
"def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()",
"def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy",
"def update_game_state(self):\n # if board is not filled out, returns a valid move message\n for row in self.board:\n if 0 in row:\n return \"Valid input\"\n\n # if board is filled out, verifies if solution is valid and updates game state\n self.game_state = alg.check_solution(self.board)\n return self.game_state",
"def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)",
"def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False",
"def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move",
"def execute_move(self, game_state):\n game_state.pacs_pos[self.pac_id] = self.next_move",
"def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun",
"def change_move_state(self, new_state):\n\n if new_state != self.move_state:\n print(\"Changing move state from \", states[self.move_state],\n \" to \", states[new_state])\n self.move_state = new_state\n print(\"move_state is now\", self.move_state)",
"def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS",
"def update_game_states(player_move, values):\n moves = ['a', 's', 'd']\n ordered = sorted([n for n in game_states[values].values()\n if type(n) != str])[::-1]\n for i in range(3):\n opt_a_letter = moves[i]\n opt_a_number = game_states[values][opt_a_letter]\n\n opt_b_letter = moves[i-2]\n opt_b_number = game_states[values][opt_b_letter]\n\n if player_move == moves[i]:\n if type(opt_b_number) == int:\n if not (opt_b_number == ordered[0] and\n (opt_b_number-ordered[1]) >= 10):\n game_states[values][opt_b_letter] += 1\n\n elif type(opt_a_number) == int:\n if not (opt_a_number == ordered[0] and\n (opt_a_number-ordered[1]) >= 10):\n game_states[values][opt_a_letter] += 1",
"def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state",
"def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state",
"def move(self, state):\n raise NotImplementedError(\"Need to implement this method\")",
"def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)"
] | [
"0.8120185",
"0.8120185",
"0.8120185",
"0.8101825",
"0.8077781",
"0.7200039",
"0.70454454",
"0.6661715",
"0.6583404",
"0.6582128",
"0.65785265",
"0.65703756",
"0.6560121",
"0.6289467",
"0.6268087",
"0.61335135",
"0.61134064",
"0.60638404",
"0.605805",
"0.6051816",
"0.60436565",
"0.6034461",
"0.60166997",
"0.6004741",
"0.59758765",
"0.5964111",
"0.5961281",
"0.5947658",
"0.5941391",
"0.59357363"
] | 0.81526875 | 1 |
Check whether the puzzle satisfies the row zero invariant at the given column (col > 1) Returns a boolean | def row0_invariant(self, target_col):
result = True
if self._grid[0][target_col] != 0:
result = False
if self._grid[1][target_col] != (target_col + self._width * 1):
result = False
for row in range(2, self._height):
for col in range(self._width):
solved_value = (col + self._width * row)
if solved_value != self._grid[row][col]:
result = False
for row in (0, 1):
for col in range(target_col+1, self._width):
solved_value = (col + self._width * row)
if solved_value != self._grid[row][col]:
result = False
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True",
"def row0_invariant(self, target_col):\r\n # asserts that curr_tile is in target_col\r\n if self.get_number(0, target_col) != 0:\r\n return False\r\n # asserts that tile (0,j) is solved, the grid below (0,j) and to the right is solved \r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(0, self.get_height()):\r\n if dummy_i > 1 or (dummy_i == 0 and dummy_j > target_col) or (dummy_i == 1 and dummy_j >= target_col):\r\n if (dummy_i, dummy_j) != self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True",
"def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False",
"def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True",
"def row0_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[0][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n \r\n\r\n for ind in range(len(self._grid[1][target_col:])):\r\n if self.current_position(1, ind+target_col) != (1, ind+target_col):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 3:\r\n # print 'All conditions are cprrect!'\r\n return True",
"def row1_invariant(self, target_col):\n result = True\n if self._grid[1][target_col] != 0:\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result",
"def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True",
"def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])",
"def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])",
"def square_empty(column, row):\n if np.flipud(STATE)[row][column] == '-':\n return True\n else:\n return False",
"def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True",
"def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True",
"def check_pivot_row(self, row):\r\n all_zeros = True\r\n for i in range(self.SIZE):\r\n if self.matrix[row][i] != 0:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n self.check_solvability(0, self.matrix[row][-1])",
"def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True",
"def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True",
"def check_if_column_full(self, board, x):\n for y in reversed(range(self.height // 80)):\n if board[x, 0] != 0:\n return True\n elif board[(x, y)] == 0:\n return False\n else:\n y -= y\n continue",
"def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False",
"def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True",
"def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True",
"def row0_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (0, target_col).\n if self.get_number(0, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(0, col) != col:\n return False\n \n # Returns False if tiles to the right of target_col in row 1 are NOT positioned correctly.\n for col in range(target_col, self.get_width()):\n if self.get_number(1, col) != col + self.get_width():\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True",
"def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False",
"def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True",
"def check_tile_availability(self, row, col):\n return self.board[row][col] == 0",
"def isSafe(board, row, col, n):\n\n \"\"\" veriying the row on left side \"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row,-1,-1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row, n, 1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n return True",
"def is_in_chessboard(row_or_col):\n\n nonzero, = row_or_col.nonzero()\n\n # compute the approximate number of crossed squares\n squares = 0\n for i, j in zip(nonzero, nonzero[1:]):\n if j - i >= min_square_dim:\n squares += 1\n\n return squares >= 8",
"def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True",
"def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False",
"def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols",
"def check_if_solvable(self):\n\n self.solvable=True #status of sudoku\n for i in range(0, 9):\n for j in range(0, 9):\n if self.a[i][j]==0:\n continue\n if self.check(i, j)[self.a[i][j]]==0:\n self.solvable=False\n return False",
"def is_complete(sudoku_board):\n BoardArray = sudoku_board.CurrentGameBoard\n size = len(BoardArray)\n subsquare = int(math.sqrt(size))\n\n #check each cell on the board for a 0, or if the value of the cell\n #is present elsewhere within the same row, column, or square\n for row in range(size):\n for col in range(size):\n if BoardArray[row][col]==0:\n return False\n for i in range(size):\n if ((BoardArray[row][i] == BoardArray[row][col]) and i != col):\n return False\n if ((BoardArray[i][col] == BoardArray[row][col]) and i != row):\n return False\n #determine which square the cell is in\n SquareRow = row // subsquare\n SquareCol = col // subsquare\n for i in range(subsquare):\n for j in range(subsquare):\n if((BoardArray[SquareRow*subsquare+i][SquareCol*subsquare+j]\n == BoardArray[row][col])\n and (SquareRow*subsquare + i != row)\n and (SquareCol*subsquare + j != col)):\n return False\n return True"
] | [
"0.7548881",
"0.752049",
"0.75068325",
"0.74679226",
"0.742282",
"0.7359223",
"0.7341293",
"0.73344994",
"0.7321091",
"0.73030216",
"0.7264205",
"0.7223349",
"0.7205626",
"0.7195732",
"0.7169341",
"0.7164632",
"0.71609145",
"0.71587044",
"0.71222657",
"0.7111231",
"0.7105816",
"0.7091535",
"0.7042283",
"0.70242876",
"0.70187825",
"0.70182234",
"0.6997958",
"0.6971268",
"0.6920552",
"0.6917151"
] | 0.76055276 | 0 |
Check whether the puzzle satisfies the row one invariant at the given column (col > 1) Returns a boolean | def row1_invariant(self, target_col):
result = True
if self._grid[1][target_col] != 0:
result = False
for row in range(2, self._height):
for col in range(self._width):
solved_value = (col + self._width * row)
if solved_value != self._grid[row][col]:
result = False
for row in (0, 1):
for col in range(target_col+1, self._width):
solved_value = (col + self._width * row)
if solved_value != self._grid[row][col]:
result = False
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True",
"def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True",
"def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True",
"def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True",
"def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True",
"def solve_one(board: Board, col: int) -> bool:\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False",
"def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])",
"def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True",
"def row1_invariant(self, target_col):\n \n # Returns False if zero tile is NOT in target position (1, target_col).\n if self.get_number(1, target_col) != 0:\n return False\n \n # Returns False if tiles to the right of target_col are NOT positioned correctly.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(1, col) != col + (1 * self.get_width()):\n return False\n\n # Returns False if tiles in rows 2 and below are NOT positioned correctly.\n if 1 < self.get_height():\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True",
"def row0_invariant(self, target_col):\n result = True\n if self._grid[0][target_col] != 0:\n result = False\n if self._grid[1][target_col] != (target_col + self._width * 1):\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result",
"def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False",
"def row_constraint(board: Board, row: int, col: int) -> bool:\n for i in range(col):\n if board.is_queen(row=row, col=i):\n return False\n return True",
"def isSafe(board, row, col, n):\n\n \"\"\" veriying the row on left side \"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row,-1,-1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n \"\"\" veriying upper diagonal on left side \"\"\"\n for i,j in zip(range(row, n, 1), range(col,-1,-1)):\n if board[i][j] == 1:\n return False\n\n return True",
"def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r",
"def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])",
"def row0_invariant(self, target_col):\r\n # asserts that curr_tile is in target_col\r\n if self.get_number(0, target_col) != 0:\r\n return False\r\n # asserts that tile (0,j) is solved, the grid below (0,j) and to the right is solved \r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(0, self.get_height()):\r\n if dummy_i > 1 or (dummy_i == 0 and dummy_j > target_col) or (dummy_i == 1 and dummy_j >= target_col):\r\n if (dummy_i, dummy_j) != self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True",
"def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r",
"def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True",
"def check_constraints(board: Board, row: int, col: int) -> bool:\n if not row_constraint(board=board, row=row, col=col):\n return False\n if not upper_diagonal_constraint(board=board, row=row, col=col):\n return False\n if not lower_diagonal_constraint(board, row, col):\n return False\n return True",
"def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )",
"def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False",
"def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False",
"def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True",
"def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False",
"def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0",
"def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0",
"def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols",
"def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True",
"def row0_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[0][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n \r\n\r\n for ind in range(len(self._grid[1][target_col:])):\r\n if self.current_position(1, ind+target_col) != (1, ind+target_col):\r\n # print 'Some tile in the lower row does not in correct place in row0_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 3:\r\n # print 'All conditions are cprrect!'\r\n return True",
"def row1_invariant(self, target_col):\r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[1][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right"
] | [
"0.7744749",
"0.7643967",
"0.7540741",
"0.7405464",
"0.7363063",
"0.7331644",
"0.73298305",
"0.72720087",
"0.7253807",
"0.71994525",
"0.7157826",
"0.71566737",
"0.71124315",
"0.7103953",
"0.7086645",
"0.70836926",
"0.7064152",
"0.7060724",
"0.7032539",
"0.7031373",
"0.7020518",
"0.70196646",
"0.69543797",
"0.6903697",
"0.68981075",
"0.68981075",
"0.68949056",
"0.68874323",
"0.6883886",
"0.6859798"
] | 0.77575016 | 0 |
Generate a solution string for a puzzle Updates the puzzle and returns a move string | def solve_puzzle(self):
cur0_row, cur0_col = self.current_position(0, 0)
move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)
self.update_puzzle(move_str)
for row in range(self._height-1, 1, -1):
for col in range(self._width-1, -1, -1):
assert self.lower_row_invariant(row, col)
if col != 0:
move_str += self.solve_interior_tile(row, col)
else:
move_str += self.solve_col0_tile(row)
for col in range(self._width-1, 1, -1):
assert self.row1_invariant(col)
move_str += self.solve_row1_tile(col)
assert self.row0_invariant(col)
move_str += self.solve_row0_tile(col)
move_str += self.solve_2x2()
return move_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str",
"def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string",
"def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move",
"def gen_solve_to_text(self):\n\n count = 0\n self.url = \"scramble: \\n\"\n for move in self.scramble.split():\n self.url += \"{} \".format(move)\n self.url += \"\\n\\nsolve:\\n\"\n\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"\\n//{}\\n\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n self.url += \"{} \".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"//{}\\n\".format(piece) + alg\n\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n self.url += \"// {} \\n\".format(move[\"comment\"])",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print",
"def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r",
"def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str",
"def solve_2x2(self):\n # replace with your code\n string = ''\n num1 = self.get_number(0, 0)\n num2 = self.get_number(0, 1)\n num3 = self.get_number(1, 0)\n max_num = max([num1, num2, num3])\n min_num = min([num1, num2, num3])\n if num1 == min_num and num2 == max_num:\n string += 'ul'\n elif num1 == max_num and num3 == min_num:\n string += 'ul'\n string += 'rdlu' * 2\n elif num2 == min_num and num3 == max_num:\n string += 'ul'\n string += 'rdlu'\n print '2x2 Path', string\n self.update_puzzle(string)\n return string",
"def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")",
"def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str",
"def generate_strings(self, new_puzzle):\n return new_puzzle._start",
"def shuffle_puzzle(solution: str) -> str:\r\n shuffled_solution = solution[:-1]\r\n\r\n # Do more shuffling for bigger puzzles.\r\n swaps = len(solution) * 2\r\n for _ in range(swaps):\r\n # Pick two indices in the puzzle randomly.\r\n index1, index2 = random.sample(range(len(shuffled_solution)), k=2)\r\n shuffled_solution = swap_position(shuffled_solution, index1, index2)\r\n\r\n return shuffled_solution + EMPTY",
"def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))",
"def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements",
"def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value",
"def solution(self) -> str:\n\n # \"Starting after the cup labeled 1, collect the other cups' labels clockwise into a single string with no\n # extra characters.\"\n\n self.current = 1\n eight_cups = self.pick_up_cups(8) # 9 cups in the circle, so all cups except '1' is 8 cups.\n\n answer = ''\n for cup in eight_cups:\n answer += str(cup)\n return answer",
"def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr",
"def solve_col0_tile(self, target_row):\n # replace with your code\n string = ''\n target = self.current_position(target_row, 0)\n row_difference = target_row - target[0]\n col_difference = target[1]\n string += 'u' * row_difference\n if col_difference > 0:\n string += 'r' * (col_difference - 1)\n if row_difference > 1:\n string += 'druld' * (row_difference - 1)\n string += 'rulld' * (col_difference - 1)\n string += 'ruldrdlurdluurddlu'\n elif col_difference == 0:\n string += 'rddlu' * (row_difference - 2)\n if row_difference > 1:\n string += 'rd'\n string += 'l'\n string += 'ruldrdlurdluurddlu'\n string += 'r' * (self._width - 1)\n print 'Col 0 Path', string\n self.update_puzzle(string)\n assert self.lower_row_invariant(target_row - 1, self._width -1), 'False string'\n return string",
"def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)",
"def solve_row0_tile(self, target_col):\n # replace with your code\n string = ''\n assert self.row0_invariant(target_col), 'False precondition'\n target = self.current_position(0, target_col)\n row_difference = target[0]\n col_difference = target_col - target[1]\n if row_difference == 0:\n if col_difference == 1:\n string += 'ld'\n elif col_difference > 1:\n string += 'l' * col_difference\n string += 'drrul' * (col_difference - 2)\n string += 'druld'\n string += 'urdlurrdluldrruld'\n elif row_difference == 1:\n if col_difference == 1:\n string += 'lld'\n string += 'urdlurrdluldrruld'\n elif col_difference > 1:\n string += 'ld'\n string += 'l' * (col_difference - 1)\n string += 'urrdl' * (col_difference - 2)\n string += 'urdlurrdluldrruld'\n print 'Row 0 Path', string\n self.update_puzzle(string)\n assert self.row1_invariant(target_col - 1), 'False string'\n return string",
"def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))",
"def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\""
] | [
"0.7559481",
"0.73133874",
"0.69816166",
"0.66144603",
"0.6516676",
"0.63399726",
"0.6325148",
"0.6325148",
"0.6299051",
"0.6299051",
"0.6299051",
"0.6297025",
"0.62952787",
"0.6252764",
"0.6244446",
"0.6211515",
"0.61357105",
"0.6083045",
"0.6063561",
"0.6049944",
"0.602346",
"0.60146224",
"0.5877995",
"0.58751774",
"0.58524394",
"0.5840316",
"0.5820368",
"0.5808467",
"0.5736328",
"0.57171214"
] | 0.7348655 | 1 |
Run a reaction and combine the products in a single string. Makes errors readable ish | def _reactAndSummarize(rxn_smarts, *smiles):
rxn = rdChemReactions.ReactionFromSmarts(rxn_smarts)
mols = [Chem.MolFromSmiles(s) for s in smiles]
products = []
for prods in rxn.RunReactants(mols):
products.append(' + '.join(map(_getProductCXSMILES, prods)))
products = ' OR '.join(products)
return products | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reaction_str(self):\n\n def format(number):\n return str(number).rstrip(\".0\") + \" \"\n\n reactant_bits = []\n product_bits = []\n for met in sorted(self._metabolites, key=attrgetter(\"id\")):\n coefficient = self._metabolites[met]\n if coefficient >= 0:\n product_bits.append(format(coefficient) + met.id)\n else:\n reactant_bits.append(format(abs(coefficient)) + met.id)\n\n reaction_string = ' + '.join(reactant_bits)\n if self.gapfill_direction == '=':\n reaction_string += ' <=> '\n elif self.gapfill_direction == '<':\n reaction_string += ' <-- '\n elif self.gapfill_direction == '>':\n reaction_string += ' --> '\n reaction_string += ' + '.join(product_bits)\n return reaction_string",
"def rxn(self, string, k = 1, rtype='condensed'):\n reactants, products = string.split('->')\n reactants = reactants.split('+')\n products = products.split('+')\n\n reactants = [self.cplx(x) for x in reactants]\n products = [self.cplx(x) for x in products]\n self.reactions.add(PepperReaction(reactants, products, rtype.strip(), rate=k))",
"async def send_react(self, reactions, *args, **kwargs):\n message = await self.send(*args, **kwargs)\n if isinstance(reactions, str): # Handle two-character emojis\n reactions = (reactions,)\n for reaction in reactions:\n await self.add_reaction(message, reaction)\n return message",
"def get_reaction(reaction_type):\n\n if reaction_type == \"neg\":\n speechcon = \"<say-as interpret-as='interjection'>\" \\\n + random.choice(NEG_SPEECHCONS) + \"</say-as>\"\n ans = random.choice(NEG_ANS)\n elif reaction_type == \"pos\":\n speechcon = random.choice(POS_SPEECHCONS)\n ans = random.choice(POS_ANS)\n else:\n raise ValueError\n\n return speechcon + ans",
"def chain(self):\n commodity = self.commodity\n reactions = set()\n reaction_count = 0\n\n for comm in commodity:\n\n n = len(comm)\n repeated = r2_index(comm)\n inloop_r_count = 0\n\n for x in range(0, n - 1):\n\n if self.recombination == Recomb_1:\n\n i = x + 1\n\n if comm[x] != comm[x + 1]:\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n left1 = left2\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n\n elif self.recombination == Recomb_2:\n\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [repeated[0][0] for i in range(0, n)]\n right1 = [repeated[1][0] for i in range(0, n)]\n i = repeated[1][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n if right2 == comm:\n break\n else:\n left1 = right2\n right1 = [repeated[inloop_r_count][0] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n i = repeated[inloop_r_count][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n # all same char in comm\n elif comm == n * comm[0]:\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n break\n\n # reaction_comm[comm] = reactions\n self.reactions = reactions\n return self.reactions",
"def addReaction(\n self, reactants, products, expression, local_params=None, rxn_id=\"\"\n ):\n\n r1 = self.model.createReaction()\n self.check(r1, \"create reaction\")\n if len(rxn_id) == 0:\n rxn_id = \"v\" + str(self.model.getNumReactions())\n self.check(r1.setId(rxn_id), \"set reaction id\")\n self.check(r1.setReversible(False), \"set reaction reversibility flag\")\n self.check(r1.setFast(False), 'set reaction \"fast\" attribute')\n\n for re in reactants:\n if re is not None and \"$\" in re:\n re.translate(None, \"$\")\n re_split = re.split()\n if len(re_split) == 1:\n sto = 1.0\n re_id = re\n elif len(re_split) == 2 and re_split[0].isdigit():\n sto = float(re_split[0])\n re_id = re_split[1]\n else:\n err_msg = (\n \"Error: reactants must be listed in format 'S' or '(float)' S'\"\n )\n raise SystemExit(err_msg)\n s1 = self.model.getSpecies(re_id)\n species_ref1 = r1.createReactant()\n self.check(species_ref1, \"create reactant\")\n self.check(species_ref1.setSpecies(s1.getId()), \"assign reactant species\")\n self.check(\n species_ref1.setStoichiometry(sto), \"assign reactant stoichiometry\"\n )\n if self.document.getLevel() == 3:\n self.check(\n species_ref1.setConstant(True), 'set \"constant\" on species ref 1'\n )\n\n for pro in products:\n if pro is not None and \"$\" in pro:\n pro.translate(None, \"$\")\n pro_split = pro.split()\n if len(pro_split) == 1:\n sto = 1.0\n pro_id = pro\n elif len(pro_split) == 2:\n sto = float(pro_split[0])\n pro_id = pro_split[1]\n else:\n err_msg = \"Error: products must be listed in format 'S' or '(float)' S'\"\n raise SystemExit(err_msg)\n s2 = self.model.getSpecies(pro_id)\n species_ref2 = r1.createProduct()\n self.check(species_ref2, \"create product\")\n self.check(species_ref2.setSpecies(s2.getId()), \"assign product species\")\n self.check(species_ref2.setStoichiometry(sto), \"set product stoichiometry\")\n if self.document.getLevel() == 3:\n self.check(\n species_ref2.setConstant(True), 'set \"constant\" on species ref 2'\n )\n\n math_ast = libsbml.parseL3Formula(expression)\n self.check(math_ast, \"create AST for rate expression\")\n\n kinetic_law = r1.createKineticLaw()\n self.check(kinetic_law, \"create kinetic law\")\n self.check(kinetic_law.setMath(math_ast), \"set math on kinetic law\")\n if local_params is not None:\n for param in local_params.keys():\n val = local_params.get(param)\n if self.document.getLevel() == 3:\n p = kinetic_law.createLocalParameter()\n else:\n p = kinetic_law.createParameter()\n self.check(p, \"create local parameter\")\n self.check(p.setId(param), \"set id of local parameter\")\n self.check(p.setValue(val), \"set value of local parameter\")\n return r1",
"def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr",
"def _GetReactionSideString(side):\n sdata = []\n for c_w_coeff in side:\n if c_w_coeff.coeff == 1:\n sdata.append(c_w_coeff.GetName())\n else:\n sdata.append('%d %s' % (c_w_coeff.coeff,\n c_w_coeff.GetName()))\n return ' + '.join(sdata)",
"def __str__(self):\n s = \"\"\n for e in self._sub_effects:\n s += str(e) + \" ^ \"\n return s[0:-3] if len(self._sub_effects) > 0 else \"Void\"",
"def test_react(self):\n procnum = 1\n\n spc_a = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spc_a, spc), ['H_Abstraction']) for spc in spcs]\n\n reaction_list = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 3)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in reaction_list]))",
"def genReactionAntString(self, revTag = \"RevRe__\",\n iRevTag = \"IrRevRe__\"):\n \n lines = self.antString.splitlines()\n lines = [line.split(\"#\")[0] for line in lines]\n rLines = [line.split(\":\") for line in lines if\n len(line.split(\":\"))==2]\n rLines = [[line[0]]+line[1].split(\";\") for line in rLines\n if len(line[1].split(\";\"))>=2]\n rLines = [[part.strip() for part in line] for line in rLines]\n rLines = [line for line in rLines if (\"->\" in line[1]) or\n (\"=>\" in line[1])]\n rLines = [[line[0], \"->\" in line[1], line[2]] for line in rLines]\n rLines = [[revTag+line[0], line[1], line[2]] if line[1] else\n [iRevTag+line[0], line[1], line[2]] for line in rLines]\n rLines = [line[0]+\" := \"+line[2]+\";\" for line in rLines]\n primed = False\n for i, line in zip(range(len(lines)),lines):\n if line.strip().startswith(\"model\"):\n primed = True\n if (line.strip() == \"end\") and primed:\n break\n print(\"line \"+str(i))\n indent = \"\"\n while indent == \"\" and i>0:\n i = i-1\n indent = re.search(r'^\\s*', lines[i]).group()\n rLines = [indent+line for line in rLines]\n self.reactionAntString = \"\\n\".join(lines[:i+1]+rLines+lines[i+1:])",
"def get_reaction_label(rmg_reaction):\n reactants = rmg_reaction.reactants\n products = rmg_reaction.products\n if len(reactants) > 1:\n reactants_string = '+'.join([reactant.molecule[0].toSMILES() for reactant in reactants])\n else:\n reactants_string = reactants[0].molecule[0].toSMILES()\n if len(products) > 1:\n products_string = '+'.join([product.molecule[0].toSMILES() for product in products])\n else:\n products_string = products[0].molecule[0].toSMILES()\n reaction_label = '_'.join([reactants_string, products_string])\n return reaction_label",
"def tex_reaction_scheme(self):\n \n if self.reaction_matrix is None or self.input_params is None:\n return 'undefined'\n \n species = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n eqn = []\n \n reactants, products = self.reaction_matrix.nonzero()\n for r,p,k in zip(reactants, products,self.input_params.keys()):\n eqn.append( species[r] + r'\\xrightarrow{{' + k + '}}' + species[p])\n \n latex_eqn = r'$' + ','.join(eqn) + r'$'\n return latex_eqn",
"def idea(self,irc,msg,args):\n company = self.pick(self.vPrefix) + \\\n self.pick(self.vMidfix) + \\\n self.pick(self.vPostfix)\n product = self.pick(self.vBased) + \" \" + \\\n self.pick(self.vProd) + \" \" + \\\n self.pick(self.vVia) \n irc.reply(\"%s - %s\" % (company,product))",
"def RecipeToText(recipe):\n\n\tout = []\n\tworld = None\n\tfor (annotation, next_world) in recipe[1:]:\n\t\tcommand = annotation[0]\n\t\targuments = annotation[1]\n\n\t\trecipe_text = ''\n\t\tif command == 'create_ing':\n\t\t\t# TODO: When computing BLEU score, we may wish to ignore create_ing\n\t\t\t# commands since they are trivially translated\n\t\t\trecipe_text += '%s.' % arguments[1]\n\n\t\telif command == 'create_tool':\n\t\t\t# TODO: This is a horrible hack but we need some way to make sure that the\n\t\t\t# length of the outputted string is equal to that of the list of original\n\t\t\t# texts.\n\t\t\trecipe_text = '<create_tool>'\n\n\t\telif command == 'combine':\n\t\t\trecipe_text += 'Combine '\n\n\t\t\trecipe_text += ', '.join([world.I_d[ing] for ing in arguments[0]])\n\n\t\t\tif not IsNull(arguments[3]):\n\t\t\t\trecipe_text += ', %s' % arguments[3]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'separate':\n\t\t\trecipe_text += 'Separate '\n\t\t\trecipe_text += '%s and %s' % (world.I_d[arguments[0]], next_world.I_d[arguments[1]])\n\n\t\t\tif not IsNull(arguments[5]):\n\t\t\t\trecipe_text += ', %s' % arguments[5]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'put':\n\t\t\trecipe_text += 'Put %s in %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'remove':\n\t\t\trecipe_text += 'Remove %s from %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'cut':\n\t\t\trecipe_text += 'Chop %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'mix':\n\t\t\trecipe_text += 'Mix %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'cook':\n\t\t\trecipe_text += 'Cook %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'do':\n\t\t\trecipe_text += 'Taking %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'serve':\n\t\t\trecipe_text += 'Serve %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'set':\n\t\t\trecipe_text += 'Set %s on %s. ' % (world.T_d[arguments[0]], arguments[1])\n\n\t\telif command == 'leave':\n\t\t\trecipe_text += 'Leave %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'chefcheck':\n\t\t\trecipe_text += 'Check %s for %s. ' % (world.I_d[arguments[0]], arguments[1])\n\n\t\tworld = next_world\n\t\tout.append(recipe_text)\n\n\treturn out",
"def test_reaction_inverts_stereo(self):\n reaction = '[C@:1]>>[C@@:1]'\n\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br |o1:1|')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@H](Cl)Br |&1:1|')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')",
"async def on_reaction_add(reaction, user):\n #Before doing anything\n #Check to see if the reaction was a karma emoji\n if reaction.emoji == initKarma.goodKarma:\n consoleMessage = 'Writing to karmaData file :: Increasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '+1')\n if reaction.emoji == initKarma.badKarma:\n consoleMessage = 'Writing to karmaData file :: Decreasing '\n consoleMessage += reaction.message.author.name\n consoleMessage += '\\'s karma by 1!'\n print(consoleMessage)\n await karmaUpdate(client, reaction.message, '-1')",
"def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)",
"def generate_msg(props, alert=False, user_pref=None, past=False):\n\t\tmessage = emojize(\":rocket:\", use_aliases=True)\n\t\tif past:\n\t\t\tmessage += ' Launch was held on: ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '.\\n'\n\t\t\tif props['holdreason']:\n\t\t\t\tmessage += 'The launch has been *held*. Reason: ' + props['holdreason'] + '\\n'\n\t\t\tif props['failreason']:\n\t\t\t\tmessage += 'Unfortunately, the launch *failed*. Reason: ' + props['failreason'] + '\\n'\n\t\telse:\n\t\t\tif alert:\n\t\t\t\tmessage += ' *Launch is going to happen in some minutes!* '\n\t\tmessage += ' *' + props['name'] + '*' + '\\n'\n\n\t\tif not alert and not past:\n\t\t\tmessage += 'A launch will happen _' + props['when'].humanize() + '_! \\n'\n\t\t\tmessage += 'I mean ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '\\n'\n\n\t\tif past:\n\t\t\tmessage += 'Taken from *'\n\t\telse:\n\t\t\tmessage += 'Taking from *'\n\n\t\tmessage += props['location'] + '*.\\n'\n\t\tdescr = Interface.generate_description(props['missions'])\n\t\tmessage += '*Mission description*\\n' + descr + '\\n' if descr else ''\n\t\tmessage += '\\n'\n\n\t\tif props['urls']:\n\t\t\tmessage += 'Watch it here: \\n' if not past else 'You could have watched it here: \\n'\n\t\t\tfor url in props['urls']:\n\t\t\t\tmessage += ' • [' + url + '](' + url +')\\n'\n\t\telse:\n\t\t\tmessage += 'Unfortunately there '\n\t\t\tmessage += 'are' if not past else 'were'\n\t\t\tmessage += ' no reported webcasts ' \\\n\t\t\t\t\t + emojize(':disappointed_relieved:', use_aliases=True)\n\n\t\treturn message",
"def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")",
"def supercombiner(bot, ev):\n # ported from jenni\n s = 'u'\n for i in iter(range(1, 3000)):\n if unicodedata.category(chr(i)) == \"Mn\":\n s += chr(i)\n if len(s) > 100:\n break\n bot.say(s)",
"def concatenate(strings: List[str]) -> str:\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"4\")\n # END OF SOLUTION",
"def addProduct(self, *args):\n return _libsbml.Reaction_addProduct(self, *args)",
"def run(self):\n logging.debug('Displaying Info: ' + self.recipe.name)\n\n msg = PREFIX[1:] + PREFIX.join(self.recipe.info().split('\\n'))\n print(msg)\n return msg",
"async def react_with_text(\n pre_command, message: Message, is_private: bool, guild_id: int, author_id: int\n):\n if (\n (is_private or is_whitelisted(\"s_to_ringel_s\", guild_id))\n and author_id == constants.POLYid\n and \"s\" in message.content\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"s_to_ringel_s\",\n delete_message=False,\n )\n ):\n return {TEXT: \"*\" + message.content.replace(\"s\", \"ß\")}\n\n if (\n (\n is_private\n or guild_id not in constants.ayy_lmao_blacklist\n or author_id == constants.NYAid\n )\n and (message.content.lower() == \"ayy\")\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"ayy\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Lmao\"}\n\n if (\n author_id in [constants.NYAid, constants.TRISTANid]\n and message.content.lower() == \"qyy\"\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"qyy\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Kmao\"}\n\n if (\n message.content.lower() == \"lmao\"\n and author_id == constants.NYAid\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"lmao\",\n delete_message=False,\n )\n ):\n return {TEXT: \"Ayy\"}\n\n if (\n (is_private or guild_id not in constants.lenny_blacklist)\n and \"lenny\" in message.content.split(\" \")\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"response_lenny\",\n delete_message=False,\n )\n ):\n return {TEXT: \"( ͡° ͜ʖ ͡°)\"}\n\n if (\n is_private or guild_id not in constants.ded_blacklist\n ) and \"ded\" == message.content:\n ten_mins_ago = datetime.utcnow() - timedelta(minutes=10)\n try:\n history = message.channel.history(limit=2, after=ten_mins_ago)\n await history.next()\n await history.next()\n except NoMoreItems:\n if await pre_command(\n message=message,\n channel=message.channel,\n command=\"response_ded\",\n delete_message=False,\n ):\n return {TEXT: random.choice(command_text.ded)}\n\n if (\n (is_private or guild_id not in constants.table_unflip_blacklist)\n and message.content == \"(╯°□°)╯︵ ┻━┻\"\n and await pre_command(\n message=message,\n channel=message.channel,\n command=\"tableflip\",\n delete_message=False,\n )\n ):\n return {TEXT: \"┬─┬ ノ( ゜-゜ノ)\"}\n return {}",
"def test_react_parallel(self):\n import rmgpy.rmg.main\n rmgpy.rmg.main.maxproc = 2\n procnum = 2\n\n spc_a = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spc_a, spc), ['H_Abstraction']) for spc in spcs]\n\n reaction_list = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 3)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in reaction_list]))\n\n # Reset module level maxproc back to default\n rmgpy.rmg.main.maxproc = 1",
"def bot_failed_comprehension(error_message=None):\n result = \"\"\n\n if error_message:\n result += error_message + \"\\n\"\n\n result += \"Please see [here]\"\n result += \"(https://www.reddit.com/r/NHL_Stats/comments/74skjv/bot_details/do0tjzz/) \"\n result += \"for tips on proper usage.\\n\\n\"\n return result",
"def nuclear_reaction_energy(*args, **kwargs) -> u.J: # noqa: C901, PLR0915\n\n # TODO: Allow for neutrinos, under the assumption that they have no mass.\n\n # TODO: Add check for lepton number conservation; however, we might wish\n # to have violation of lepton number issuing a warning since these are\n # often omitted from nuclear reactions when calculating the energy since\n # the mass is tiny.\n\n errmsg = \"Invalid nuclear reaction.\"\n\n def process_particles_list(\n unformatted_particles_list: list[Union[str, Particle]]\n ) -> list[Particle]:\n \"\"\"\n Take an unformatted list of particles and puts each\n particle into standard form, while allowing an integer and\n asterisk immediately preceding a particle to act as a\n multiplier. A string argument will be treated as a list\n containing that string as its sole item.\n \"\"\"\n\n if isinstance(unformatted_particles_list, str):\n unformatted_particles_list = [unformatted_particles_list]\n\n if not isinstance(unformatted_particles_list, (list, tuple)):\n raise TypeError(\n \"The input to process_particles_list should be a \"\n \"string, list, or tuple.\"\n )\n\n particles = []\n\n for original_item in unformatted_particles_list:\n try:\n item = original_item.strip()\n\n if item.count(\"*\") == 1 and item[0].isdigit():\n multiplier_str, item = item.split(\"*\")\n multiplier = int(multiplier_str)\n else:\n multiplier = 1\n\n try:\n particle = Particle(item)\n except InvalidParticleError as exc:\n raise ParticleError(errmsg) from exc\n\n if particle.element and not particle.isotope:\n raise ParticleError(errmsg)\n\n particles += [particle] * multiplier\n\n except ParticleError:\n raise ParticleError(\n f\"{original_item} is not a valid reactant or \"\n \"product in a nuclear reaction.\"\n ) from None\n\n return particles\n\n def total_baryon_number(particles: list[Particle]) -> int:\n \"\"\"\n Find the total number of baryons minus the number of\n antibaryons in a list of particles.\n \"\"\"\n return sum(particle.baryon_number for particle in particles)\n\n def total_charge(particles: list[Particle]) -> int:\n \"\"\"\n Find the total charge number in a list of nuclides\n (excluding bound electrons) and other particles.\n \"\"\"\n total_charge = 0\n for particle in particles:\n if particle.isotope:\n total_charge += particle.atomic_number\n elif not particle.element:\n total_charge += particle.charge_number\n return total_charge\n\n def add_mass_energy(particles: list[Particle]) -> u.Quantity:\n \"\"\"\n Find the total mass energy from a list of particles, while\n taking the masses of the fully ionized isotopes.\n \"\"\"\n total_mass_energy = 0.0 * u.J\n for particle in particles:\n total_mass_energy += particle.mass_energy\n return total_mass_energy.to(u.J)\n\n input_err_msg = (\n \"The inputs to nuclear_reaction_energy should be either \"\n \"a string representing a nuclear reaction (e.g., \"\n \"'D + T -> He-4 + n') or the keywords 'reactants' and \"\n \"'products' as lists with the nucleons or particles \"\n \"involved in the reaction (e.g., reactants=['D', 'T'] \"\n \"and products=['He-4', 'n'].\"\n )\n\n reaction_string_is_input = args and not kwargs and len(args) == 1\n\n reactants_products_are_inputs = kwargs and not args and len(kwargs) == 2\n\n if reaction_string_is_input == reactants_products_are_inputs:\n raise ParticleError(input_err_msg)\n\n if reaction_string_is_input:\n reaction = args[0]\n\n if not isinstance(reaction, str):\n raise TypeError(input_err_msg)\n elif \"->\" not in reaction:\n raise ParticleError(\n f\"The reaction '{reaction}' is missing a '->'\"\n \" or '-->' between the reactants and products.\"\n )\n\n try:\n LHS_string, RHS_string = re.split(\"-+>\", reaction)\n LHS_list = re.split(r\" \\+ \", LHS_string)\n RHS_list = re.split(r\" \\+ \", RHS_string)\n reactants = process_particles_list(LHS_list)\n products = process_particles_list(RHS_list)\n except ParticleError as ex:\n raise ParticleError(f\"{reaction} is not a valid nuclear reaction.\") from ex\n\n elif reactants_products_are_inputs:\n try:\n reactants = process_particles_list(kwargs[\"reactants\"])\n products = process_particles_list(kwargs[\"products\"])\n except TypeError as t:\n raise TypeError(input_err_msg) from t\n except ParticleError as e:\n raise ParticleError(errmsg) from e\n\n if total_baryon_number(reactants) != total_baryon_number(products):\n raise ParticleError(\n f\"The baryon number is not conserved for {reactants = } and {products = }.\"\n )\n\n if total_charge(reactants) != total_charge(products):\n raise ParticleError(\n f\"Total charge is not conserved for {reactants = } and {products = }.\"\n )\n\n return add_mass_energy(reactants) - add_mass_energy(products)",
"def madlibs(a, b, c, d='hyena', e='butt heads'):\n str1 = f'{a} went out to find {b}. It was {c}.'\n str1 += f' A {d} was around, trying to {e}.'\n return str1",
"def test_make_new_reaction(self):\n\n procnum = 2\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n\n cerm = CoreEdgeReactionModel()\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n \"\"\"\n 3 expected H-abstraction reactions:\n OH + CC = H2O + C[CH2]\n OH + [CH3] = H2O + [CH2]\n OH + [CH3] = [O] + C\n \"\"\"\n\n # count no. of entries in reactionDict:\n counter = 0\n for fam, v1 in cerm.reaction_dict.items():\n for key2, v2 in v1.items():\n for key3, rxnList in v2.items():\n counter += len(rxnList)\n\n self.assertEquals(counter, 3)"
] | [
"0.6317691",
"0.59467554",
"0.57581663",
"0.5728929",
"0.5590028",
"0.5556531",
"0.5528526",
"0.55182624",
"0.5469046",
"0.54264945",
"0.5413973",
"0.5307061",
"0.5297744",
"0.5281674",
"0.5246843",
"0.52160555",
"0.51447445",
"0.51412046",
"0.5139743",
"0.511896",
"0.50641066",
"0.5057844",
"0.50475806",
"0.5043912",
"0.5030848",
"0.5024914",
"0.502204",
"0.50214237",
"0.5020959",
"0.5020217"
] | 0.5951013 | 1 |
StereoGroup atoms are in the reaction, and the reaction destroys the specified chirality at the stereo centers > invalidate stereo center, preserve the rest of the stereo group. | def test_reaction_destroys_stereo(self):
reaction = '[C@:1]>>[C:1]'
products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')
self.assertEqual(products, 'FC(Cl)Br')
products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')
self.assertEqual(products, 'FC(Cl)Br')
products = _reactAndSummarize(reaction, 'FC(Cl)Br')
self.assertEqual(products, 'FC(Cl)Br')
reaction = '[C@:1]F>>[C:1]F'
# Reaction destroys stereo (but preserves unaffected group
products = _reactAndSummarize(reaction,
'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')
self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')
# Reaction destroys stereo (but preserves the rest of the group
products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')
self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')",
"def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')",
"def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )",
"def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)",
"def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)",
"def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)",
"def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)",
"def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False",
"def group_group_collide(sprite_group, o_sprite_group):\n sprites = set(sprite_group)\n for sprite in sprites:\n if group_collide(o_sprite_group, sprite):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False",
"def _kill_group(self, x, y):\n if self[x, y] not in self.TURNS:\n raise BoardError('Can only kill black or white group')\n\n group = self.get_group(x, y)\n score = len(group)\n\n for x1, y1 in group:\n self[x1, y1] = self.EMPTY\n\n return score",
"def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False",
"def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))",
"async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))",
"def replace(self):\n if self.removed:\n self.coordinates = [[(self.player * 15 - 15), 0], [(self.player * 15 - 15), 1],\n [(self.player * 15 - 15), 2], [(self.player * 15 - 15), 3]]\n for i in self.coordinates:\n self.collision_boxes.append(rect.Rect(i[0] * 64, i[1] * 64, 64, 64))\n self.removed=False",
"def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)",
"def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)",
"def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret",
"def handle_collisions():\n for sprite in sprite_group:\n for other in pygame.sprite.spritecollide(sprite, sprite_group, False):\n if sprite is not other and DO_KILL:\n sprite.kill()\n other.kill()",
"def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()",
"def unsetReversible(self):\n return _libsbml.Reaction_unsetReversible(self)",
"def remove_from_group(self, org, contact, group):\n pass",
"def unsetReaction(self):\n return _libsbml.GeneAssociation_unsetReaction(self)",
"def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')",
"def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)",
"def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)",
"def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum",
"def _ignore_collision(self):\n # The legacy version only ignores collision of child links of active joints.\n for link in self.cabinet.get_links():\n for s in link.get_collision_shapes():\n g0, g1, g2, g3 = s.get_collision_groups()\n s.set_collision_groups(g0, g1, g2 | 1 << 31, g3)",
"def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n self._key = False\n self._lives -= 1",
"def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')",
"def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]"
] | [
"0.6586952",
"0.58681905",
"0.5560465",
"0.5431528",
"0.5087718",
"0.49389872",
"0.49223432",
"0.4907192",
"0.48812112",
"0.4859121",
"0.48425356",
"0.48171473",
"0.48001352",
"0.47328418",
"0.46946502",
"0.46811792",
"0.46665424",
"0.46157676",
"0.4614911",
"0.4604623",
"0.45941934",
"0.45920017",
"0.45833287",
"0.45755368",
"0.4573333",
"0.456913",
"0.45406634",
"0.45349336",
"0.45329696",
"0.45296168"
] | 0.6826048 | 0 |
StereoGroup atoms are in the reaction, and the reaction creates the specified chirality at the stereo centers > remove the stereo center from > invalidate stereo group | def test_reaction_defines_stereo(self):
products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')
self.assertEqual(products, 'F[C@@H](Cl)Br')
products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')
self.assertEqual(products, 'F[C@@H](Cl)Br')
products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')
self.assertEqual(products, 'F[C@@H](Cl)Br')
# Remove group with defined stereo
products = _reactAndSummarize('[C:1]F>>[C@@:1]F',
'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')
self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')
# Remove atoms with defined stereo from group
products = _reactAndSummarize('[C:1]F>>[C@@:1]F',
'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')
self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')",
"def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')",
"def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )",
"def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)",
"def test_reaction_ignores_stereo(self):\n reaction = '[C:1]>>[C:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)",
"def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)",
"def test_reaction_preserves_stereo(self):\n reaction = '[C@:1]>>[C@:1]'\n reactants = ['F[C@H](Cl)Br |o1:1|', 'F[C@@H](Cl)Br |&1:1|', 'FC(Cl)Br']\n for reactant in reactants:\n products = _reactAndSummarize(reaction, reactant)\n self.assertEqual(products, reactant)",
"def replace(self):\n if self.removed:\n self.coordinates = [[(self.player * 15 - 15), 0], [(self.player * 15 - 15), 1],\n [(self.player * 15 - 15), 2], [(self.player * 15 - 15), 3]]\n for i in self.coordinates:\n self.collision_boxes.append(rect.Rect(i[0] * 64, i[1] * 64, 64, 64))\n self.removed=False",
"def atomisticSphere (flag, filin, filout, max_distance = 15, analysis = 1, atom_central = \"mean_point\", debug = 1):\n \n list_atom_pocket = parsePDB.loadCoordSectionPDB(filin)\n dico_stock_count = tool.generateStructCompositionAtomistic (max_distance, 3)\n \n if atom_central == \"mean_point\" : \n central_point = generateMeansPointPocket (list_atom_pocket)\n # else append barycenter pocket calculated by RADI\n \n for atom in list_atom_pocket : \n distance = parsePDB.distanceTwoatoms(central_point, atom)\n # print distance\n element = atom[\"element\"]\n name_atom = atom[\"name\"]\n residue = tool.transformAA(atom[\"resName\"])\n \n for distance_key in dico_stock_count.keys() : \n if distance <= distance_key or distance > max_distance : \n dico_stock_count [distance_key] [\"atom\"] = dico_stock_count [distance_key] [\"atom\"] + 1\n if element == \"C\" : \n dico_stock_count [distance_key] [\"carbon\"] = dico_stock_count [distance_key] [\"carbon\"] + 1\n elif element == \"N\" : \n dico_stock_count [distance_key] [\"nitrogen\"] = dico_stock_count [distance_key] [\"nitrogen\"] + 1\n elif element == \"S\" : \n dico_stock_count [distance_key] [\"sulfur\"] = dico_stock_count [distance_key] [\"sulfur\"] + 1\n elif element == \"O\" : \n dico_stock_count [distance_key] [\"oxygen\"] = dico_stock_count [distance_key] [\"oxygen\"] + 1\n elif element == \"H\" : \n dico_stock_count [distance_key] [\"hydrogen\"] = dico_stock_count [distance_key] [\"hydrogen\"] + 1\n \n if residue in dico_Hacceptor.keys () : \n if name_atom in dico_Hacceptor[residue] : \n dico_stock_count [distance_key] [\"hbond_acceptor\"] = dico_stock_count [distance_key] [\"hbond_acceptor\"] + 1\n \n if residue in dico_atom_Car : \n if name_atom in dico_atom_Car[residue] : \n dico_stock_count [distance_key] [\"aromatic\"] = dico_stock_count [distance_key] [\"aromatic\"] + 1\n \n if residue in dico_atom_hydrophobic : \n if name_atom in dico_atom_hydrophobic[residue] : \n dico_stock_count [distance_key] [\"hydrophobic\"] = dico_stock_count [distance_key] [\"hydrophobic\"] + 1\n \n if residue in dico_atom_Carg : \n if name_atom in dico_atom_Carg[residue] : \n dico_stock_count [distance_key] [\"alcool\"] = dico_stock_count [distance_key] [\"alcool\"] + 1\n \n \n if residue in dico_Hdonor.keys () : \n if name_atom in dico_Hdonor[residue] : \n dico_stock_count [distance_key] [\"hbond_donor\"] = dico_stock_count [distance_key] [\"hbond_donor\"] + 1\n \n if name_atom == \"CA\" or name_atom == \"O\" or name_atom == \"C\" or name_atom == \"N\" or name_atom == \"H\" or name_atom == \"HA\" :\n dico_stock_count [distance_key] [\"main_chain\"] = dico_stock_count [distance_key] [\"main_chain\"] + 1\n else : \n dico_stock_count [distance_key] [\"side_chain\"] = dico_stock_count [distance_key] [\"side_chain\"] + 1\n \n for distance_key in dico_stock_count.keys () : \n nb_atom = float(dico_stock_count [distance_key] [\"atom\"])\n if nb_atom == 0 : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + \"0\" + \"\\n\")\n \n else : \n filout.write (flag + \"_atom_\" + str(distance_key) +\"\\t\" + str(nb_atom) + \"\\n\")\n filout.write (flag + \"_side_chain_\"+ str(distance_key) + \"\\t\" + str (dico_stock_count [distance_key] [\"side_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_main_chain_\" + str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"main_chain\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_sulfur_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"sulfur\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_carbone_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"carbon\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_nitrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"nitrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_oxygen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"oxygen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrogen_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrogen\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_acceptor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_acceptor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hbond_donor_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hbond_donor\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_alcool_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"alcool\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_hydrophobic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"hydrophobic\"] / nb_atom) + \"\\n\")\n filout.write (flag + \"_aromatic_\"+ str(distance_key) +\"\\t\" + str (dico_stock_count [distance_key] [\"aromatic\"] / nb_atom) + \"\\n\")",
"def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()",
"def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)",
"def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False",
"def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])",
"def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)",
"def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )",
"def update(self):\n #self.model.states[Polymerase].molecules\n\n DNA_obj = self.model.states[DNA].get_molecules(\"DNA\")[0]\n\n for i in range(1): #500\n DNA_obj.bind_polymerase()\n \n for i in range(50): #50\n DNA_obj.move_polymerase()\n #print(DNA_obj.poly_transcript)\n \n\n\n\n #print(self.test.poly_status)\n #print(DNA_obj.poly_pos)",
"def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)",
"def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret",
"def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum",
"def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )",
"def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]",
"def remove_clashes(self):\n dihe_parameters = self.myGlycosylator.builder.Parameters.parameters['DIHEDRALS']\n vwd_parameters = self.myGlycosylator.builder.Parameters.parameters['NONBONDED']\n \n static_glycans = None\n for k in self.original_glycanMolecules:\n if k not in self.linked_glycanMolecules:\n if static_glycans is not None:\n static_glycans += self.original_glycanMolecules[k].atom_group\n else:\n static_glycans = self.original_glycanMolecules[k].atom_group.copy()\n \n environment = self.myGlycosylator.protein.copy() \n environment += static_glycans\n \n #Build topology\n self.myGlycosylator.build_glycan_topology(glycanMolecules = self.linked_glycanMolecules, build_all = False)\n sampler = glc.Sampler(self.linked_glycanMolecules.values(), environment, dihe_parameters, vwd_parameters)\n sampler.remove_clashes_GA()",
"def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity",
"def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))",
"async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))",
"def remove_from_group(self, org, contact, group):\n pass",
"def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)",
"def terminate(self, atoms):\n\n c = list(atoms.keys())[0] # name of carbon atom being terminated\n c_ndx = list(atoms.values())[0] # serial index of carbon begin terminated\n\n chain = self.determine_chains(c)[0] # which chain carbon atom is on\n c_name = self.monomer.chains[chain][c]\n\n # to get indexing right\n c_ndx -= self.monomer.indices[chain][c_name]\n\n # types after reaction. Keeping this dictionary format so it integrates easily with xlinking algorithm\n types = {'chain': {self.monomer.chains[chain][c]: 'c3', self.monomer.dummy_connectivity[chain][c]: 'hc'}}\n\n for i in self.monomer.hydrogen_connectivity[c]: # turn already attached carbon(s) to c3\n types['chain'][i] = 'hc'\n\n # update types\n reacted_types = {'chain': {c_ndx + self.monomer.indices[chain][a]: types['chain'][a]\n for a in types['chain'].keys()}}\n\n # add dummy atom bond\n bonds = [[c_ndx + self.monomer.indices[chain]['C2'], c_ndx + self.monomer.indices[chain]['D2'], 'dummy']]\n\n radicals = []\n\n rm_improper = [[c_ndx + self.monomer.indices[chain][x] for x in self.monomer.impropers[chain][c_name]]]\n\n # define terminated atoms\n terminated = [c_ndx + self.monomer.indices[chain][c_name]]\n\n return reacted_types, bonds, radicals, rm_improper, terminated",
"def pseudopotentialise_ethane_like_molecule(self, sysargs, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n potential_coords_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_replace = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_replace = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising atoms %s ...' % [atom['#'] for atom in atoms_to_replace])\n\n # Option to place a potential on the *opposite* side of the carbon as well.\n dipolar_potentials = False\n if 'dipole' in sysargs:\n print('Dipolar potentialisation activated...')\n dipolar_potentials = True\n\n for atom in atoms_to_replace:\n # Find vector from nearest carbon.\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n vector_from_nearest_carbon = self.vectorise_atom(atom['#']) \\\n - self.vectorise_atom(distanced_carbon_list[0]['#'])\n vector_to_nearest_carbon = self.vectorise_atom(distanced_carbon_list[0]['#']) \\\n - self.vectorise_atom(atom['#'])\n\n # Lengtherise vector from carbon to give relative pp coordinates.\n vector_c_to_new_pp = self.lengtherise_vector(vector_from_nearest_carbon, self.atom_potential_set_distance)\n vector_c_to_new_dipole_pp = self.lengtherise_vector(vector_to_nearest_carbon, self.atom_potential_set_distance)\n\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_pp[2] + distanced_carbon_list[0]['z']},\n )\n if dipolar_potentials is True:\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_dipole_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_dipole_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_dipole_pp[2] + distanced_carbon_list[0]['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)",
"def destroy(self, cause:str, *, warp_core_breach:bool=False, self_destruct:bool=False):\n gd = self.game_data\n #gd.grid[self.sector_coords.y][self.sector_coords.x].removeShipFromSec(self)\n is_controllable = self.is_controllable\n #wc_value = self.sys_warp_core.get_effective_value\n\n if self.is_controllable:\n self.game_data.cause_of_damage = cause\n try:\n self.life_support.able_crew = 0\n self.life_support.injured_crew = 0\n except AttributeError:\n pass\n try:\n for k in self.torpedo_launcher.torps.keys():\n self.torpedo_launcher.torps[k] = 0\n self.torpedo_launcher.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.shield_generator.shields = 0\n self.shield_generator.shields_up = False\n self.shield_generator.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.polarized_hull.polarization_amount = 0\n self.polarized_hull.is_polarized = False\n self.polarized_hull.integrety = 0.0\n except AttributeError:\n pass\n self.power_generator.energy = 0\n self.power_generator.integrety = 0\n try:\n self.warp_drive.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.beam_array.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.cannons.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.impulse_engine.integrety = 0.0\n except AttributeError:\n pass\n self.sensors.integrety = 0.0\n try:\n self.cloak.cloak_status = CloakStatus.INACTIVE\n self.cloak.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.transporter.integrety = 0.0\n except AttributeError:\n pass\n\n if is_controllable:\n gd.engine.message_log.print_messages = False\n\n if warp_core_breach or self_destruct:\n \n self.warp_core_breach(self_destruct)\n self.hull = -self.ship_class.max_hull\n \n if self is self.game_data.selected_ship_planet_or_star:\n self.game_data.selected_ship_planet_or_star = None\n \n self.get_sub_sector.destroy_ship(self)"
] | [
"0.6699158",
"0.599879",
"0.5703354",
"0.54769295",
"0.5319069",
"0.51858544",
"0.49389228",
"0.49315634",
"0.4910987",
"0.48884475",
"0.4821768",
"0.4819886",
"0.4803131",
"0.48022223",
"0.48019797",
"0.47844344",
"0.47762632",
"0.47756767",
"0.47696617",
"0.47598013",
"0.46851963",
"0.4684362",
"0.4670574",
"0.4658945",
"0.4649536",
"0.462998",
"0.4601323",
"0.45977697",
"0.45911625",
"0.4579816"
] | 0.6745221 | 0 |
If multiple copies of an atom in StereoGroup show up in the product, they should all be part of the same product StereoGroup. | def test_reaction_copies_stereogroup(self):
# Stereogroup atoms are in the reaction with multiple copies in the product
products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',
'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',
'CC(=O)C')
# stereogroup manually checked, product SMILES assumed correct.
self.assertEqual(
products,
'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'
)
# Stereogroup atoms are not in the reaction, but have multiple copies in the
# product.
products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',
'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',
'CC(=O)C')
# stereogroup manually checked, product SMILES assumed correct.
self.assertEqual(
products,
'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject",
"def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)",
"def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)",
"def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')",
"def test_stereogroup_is_spectator_to_reaction(self):\n # 5a. Reaction preserves unrelated stereo\n products = _reactAndSummarize('[C@:1]F>>[C@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5b. Reaction ignores unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5c. Reaction inverts unrelated stereo'\n products = _reactAndSummarize('[C@:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')\n # 5d. Reaction destroys unrelated stereo' 1:3|\n products = _reactAndSummarize('[C@:1]F>>[C:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |o1:3|')\n # 5e. Reaction assigns unrelated stereo'\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')",
"def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"",
"def test_duplicate_as(bf: Session) -> None:\n peer_props = bf.q.bgpPeerConfiguration(nodes=SNAPSHOT_NODES_SPEC).answer().frame()\n as_groups = peer_props.groupby(\"Local_AS\")\n for local_as, as_group in as_groups:\n assert as_group[\"Node\"].nunique() == 1, \"ASN {} is duplicated on {}\".format(\n local_as, \", \".join(as_group[\"Node\"].unique()))",
"def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')",
"def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())",
"def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())",
"def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')",
"def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n c = self.create(ComponentItem, UML.Component)\n\n self.group(n, c)\n\n assert 1 == len(n.subject.ownedAttribute)\n assert 1 == len(n.subject.ownedConnector)\n assert 1 == len(c.subject.ownedAttribute)\n assert 2 == len(self.kindof(UML.ConnectorEnd))\n\n a1 = n.subject.ownedAttribute[0]\n a2 = c.subject.ownedAttribute[0]\n\n assert a1.isComposite\n assert a1 in n.subject.part\n\n connector = n.subject.ownedConnector[0]\n assert connector.end[0].role is a1\n assert connector.end[1].role is a2",
"def test_install_set_multi(self):\n expected = copy.deepcopy(test_xdata)\n for thing in expected.xpath(\"Children[@identical='true']/Thing\"):\n thing.text = \"same\"\n self._install(\n [lxml.etree.Element(\n \"SetMulti\", value=\"same\",\n base='Test/Children[#attribute/identical = \"true\"]',\n sub=\"Thing/#text\")],\n expected)",
"def check_sane(group):\n attrs = None\n\n for info in group:\n dup_info = dict(info)\n\n # Remove lat and lon\n for prohib in ('lat', 'lon'):\n if prohib in dup_info:\n del dup_info[prohib]\n\n if attrs is None:\n # Use the first file as a reference\n attrs = dup_info\n else:\n # Do the sanity check\n if dup_info.items() != attrs.items():\n msg = \"File '{}' doesn't match '{}' in same group\".format(\n attrs, dup_info\n )\n raise ValueError(msg)",
"def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode",
"def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp",
"def test_products_ref_groups_put(self):\n pass",
"def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)",
"def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))",
"def check_if_group_member(self, organism):\n for key, item in self.phen_dict.items():\n if organism in item:\n self.declare(Organism(name=key))",
"def update_from(self, grp_names):\n import GEOM, SMESH\n mesh_types = {\n GEOM.VERTEX : SMESH.NODE,\n GEOM.EDGE : SMESH.EDGE,\n GEOM.WIRE : SMESH.EDGE,\n GEOM.FACE : SMESH.FACE,\n GEOM.SHELL : SMESH.FACE,\n GEOM.SOLID : SMESH.VOLUME,\n GEOM.COMPSOLID : SMESH.VOLUME,\n }\n smesh = self.get_smesh()\n\n\n smesh_grps_MA = []\n smesh_grps_NO = []\n for grp in smesh.GetGroups() :\n if str(grp.GetType()) == 'NODE' :\n smesh_grps_NO.append(grp.GetName())\n else :\n smesh_grps_MA.append(grp.GetName())\n\n print smesh_grps_MA,smesh_grps_NO\n done = False\n for geom in self.give_geom().get_children():\n grp_name = geom.read_name()\n #if grp_name in smesh_grps:\n # continue\n #Modif Fournier\n print grp_name\n if grp_name in grp_names[0]:\n if grp_name in smesh_grps_MA:\n pass\n else :\n mesh_type = mesh_types.get(geom.get_shape_type())\n if mesh_type:\n #smesh.CreateGroup(mesh_type, grp_name)\n smesh.CreateGroupFromGEOM(mesh_type,grp_name,geom.get_sgeom())\n done = True\n if grp_name in grp_names[1]:\n if grp_name in smesh_grps_NO:\n continue\n #smesh.CreateGroup(SMESH.NODE,grp_name)\n smesh.CreateGroupFromGEOM(SMESH.NODE,grp_name,geom.get_sgeom())\n done = True\n return done",
"def _enforce_coupling(self):\n for body in self.bodies:\n if body.group_master:\n for body2 in self.bodies:\n if body.group == body2.group and not body2.group_master:\n body2.couple_variables(body)\n\n for scenario in self.scenarios:\n if scenario.group_master:\n for scenario2 in self.scenarios:\n if scenario.group == scenario2.group and not scenario2.group_master:\n scenario2.couple_variables(scenario)",
"def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')",
"def test_ungrouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n self.ungroup(n, a)\n\n assert 0 == len(n.subject.deployment)\n assert 0 == len(self.kindof(UML.Deployment))",
"def _fix_genotypes_object(self, genotypes, variant_info):\n # Checking the name (if there were duplications)\n if self.has_index and variant_info.name != genotypes.variant.name:\n if not variant_info.name.startswith(genotypes.variant.name):\n raise ValueError(\"Index file not synced with IMPUTE2 file\")\n genotypes.variant.name = variant_info.name\n\n # Trying to set multi-allelic information\n if self.has_index and self._index_has_location:\n # Location was in the index, so we can automatically set the\n # multi-allelic state of the genotypes\n genotypes.multiallelic = variant_info.multiallelic\n\n else:\n # Location was not in the index, so we check one marker before and\n # after the one we found\n logging.warning(\"Multiallelic variants are not detected on \"\n \"unindexed files.\")",
"def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups):\n if len(pathogen_groups_set) == 1:\n return True\n\n # Only check pathogen grouping when the flag is on\n if collapse_pathogen_groups:\n if len(pathogen_groups_set) > 2:\n return False\n if 0 in pathogen_groups_set and 1 in pathogen_groups_set:\n return True\n if 3 in pathogen_groups_set and 4 in pathogen_groups_set:\n return True\n return False",
"def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()",
"def mergable(self, frame):\n\t\tfor pos in self.srcList: \n\t\t\tif pos in frame.srcList:\n\t\t\t\treturn True\n\n\t\tfor pos in self.tgtList: \n\t\t\tif pos in frame.tgtList:\n\t\t\t\treturn True\n\n\t\treturn False",
"def test_enlarge_2_add_reactive_species(self):\n m1 = Molecule(smiles='CC')\n spc1 = self.rmg.reaction_model.make_new_species(m1, label='C2H4')[0]\n self.rmg.reaction_model.enlarge(spc1)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 2)\n self.assertTrue(self.rmg.reaction_model.core.species[1].reactive)\n\n m2 = Molecule(smiles='[CH3]')\n spc2 = self.rmg.reaction_model.make_new_species(m2, label='CH3')[0]\n self.rmg.reaction_model.enlarge(spc2)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 3)\n self.assertTrue(self.rmg.reaction_model.core.species[2].reactive)",
"def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)"
] | [
"0.57665616",
"0.5714965",
"0.56087047",
"0.5555653",
"0.53248274",
"0.5304287",
"0.5222042",
"0.520931",
"0.5198424",
"0.5198424",
"0.5195729",
"0.5162648",
"0.5140849",
"0.51147324",
"0.5112582",
"0.5106029",
"0.50989425",
"0.50513166",
"0.5048671",
"0.50138116",
"0.50047845",
"0.50035423",
"0.5003025",
"0.49802876",
"0.49784875",
"0.49448675",
"0.49367747",
"0.4920625",
"0.4905566",
"0.49040255"
] | 0.693719 | 0 |
Get the versions from GitHub tags | def get_versions(self):
# They randomly use and don't use 'r' prefix so we have to sort
# versions manually
versions = list(self._get_github_tags())
versions.sort(
key=operator.attrgetter('base_version'),
reverse=True,
)
return versions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_github_chandra_models_version_info():\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info",
"def _select_version_tags(tags):\n return [t for t in tags if VERSION_REGEX.match(t)]",
"def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)",
"def versions(self):\n versions = (t.lstrip('v') for t in self.tags)\n return filter(version_is_valid, versions)",
"def show_git_versions(ctx):\n\n ws = get_workspace(config)\n\n exp = Experiment(ws, config[\"experiment_name\"])\n\n versions = [\n (run.id, run.get_properties()[\"azureml.git.commit\"]) for run in exp.get_runs()\n ]\n\n print(tabulate(versions, headers=[\"Run ID\", \"Git Version\"]))",
"def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))",
"def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())",
"def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)",
"def get_stack_versions(stack_root):\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n if not versions:\n versions = get_versions_from_stack_root(stack_root)\n return versions",
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def get_versions(self):\n raise NotImplementedError",
"async def manage_version():\n\n try:\n repo = git.Repo(search_parent_directories=True)\n version = repo.git.describe('--tags')\n except Exception:\n version = \"v0.0.0\"\n\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n creation_time = time.ctime(os.path.getmtime(base_dir))\n\n response = {'version': version, 'deployedOn': creation_time}\n return OK(response)",
"def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []",
"def get_version_from_git(opts):\n\tstdout = opts.tag or Popen(gitargs, stdout=PIPE).communicate()[0].rstrip('\\n')\n\n\tversion, gitmeta = process_git_tag(opts.regex, stdout)\n\n\treturn version, gitmeta",
"def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest",
"def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions",
"def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]",
"def find_branches(versions):\n\n versions = map(LooseVersion, versions)\n\n # group versions by (major, minor) parts\n major_minor = lambda item: item.version[:2]\n versions.sort()\n tip = last(versions)\n grouped = groupby(versions, key=major_minor)\n\n chunks = (tuple(value) for key, value in grouped)\n\n # we only take versions which has patches\n chunks = (versions for versions in chunks if len(versions) > 1)\n\n # and we only need latest patch releases\n result = map(last, chunks)\n\n # we also add the last version bacause it is a tip\n if last(result) is not tip:\n result.append(tip)\n\n return [item.vstring for item in result]",
"def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))",
"def unsafe_get_stack_versions():\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n return (code, out, versions)",
"def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')",
"def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)",
"def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")",
"def ListVersions(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')",
"def _get_semver_versions(self, versions):\n semver = []\n for ver in versions:\n semver.append(api.to_semver(ver))\n return semver",
"def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result",
"def get_versions(start='current'):\n start = check_version_str(start)\n versions = get_linked_versions(start)\n\n results = versions[:]\n while results:\n results = get_linked_versions(results[-1])\n print results\n if results:\n versions.extend(results)\n\n versions = [x for x in set(versions) if check_manual_exists(x)]\n return sort_versions(versions, reverse=True)",
"def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()",
"def identifyVersions(self, logger):\n results = []\n # extract the version from the copyright string\n for work_str in self._version_strings:\n results.append(self.extractVersion(work_str, start_index=work_str.find(self.VERSION_STRING) + len(self.VERSION_STRING)))\n if len(results) == 0 and self._sanity_exists:\n return [self.VERSION_UNKNOWN]\n # return the result\n return results"
] | [
"0.6975342",
"0.67174757",
"0.66325366",
"0.6590542",
"0.6541449",
"0.64341235",
"0.64160585",
"0.6401173",
"0.6249824",
"0.61892974",
"0.61882895",
"0.618634",
"0.61683893",
"0.61543375",
"0.6109142",
"0.60987633",
"0.608993",
"0.6088227",
"0.6088083",
"0.6067755",
"0.6048255",
"0.6031801",
"0.59784067",
"0.5967782",
"0.59651834",
"0.59623706",
"0.5916952",
"0.59006125",
"0.5888183",
"0.5876248"
] | 0.7527406 | 0 |
Fill the packets data properties. | def fill_data(self, data):
self._data = data
self._data_length = data[1:3]
self._frame_id = data[4]
self._address = XbeeAddress(data[5:9], data[9:13], data[13:15])
self._at_command = data[15:17]
self._command_status = data[17]
try:
self._command_data = data[18:21]
self._checksum = data[22]
except IndexError:
self._command_data = None
self._checksum = data[18] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initAttributes(self):\n CCSDS.DU.DataUnit.initAttributes(self)\n self.dataFieldHeaderFlag = 0\n self.setPacketLength()",
"def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0",
"def set_properties(self):\n\n # assign feed entries from the root of the parsed data\n if hasattr(self.parsed_data, \"entries\"):\n self.items = self.parsed_data.entries\n\n # check if it is a feed root or feed element\n if hasattr(self.parsed_data, \"feed\"):\n source_data = self.parsed_data.feed\n else:\n source_data = self.parsed_data\n\n # assign available properties not listed in keymap\n self.title = source_data.title\n self.link = source_data.link\n\n for key in self.parsed_data.keymap.keys():\n if hasattr(self, key) and not getattr(self, key):\n attr_value = source_data.get(key)\n if isinstance(attr_value, struct_time):\n attr_value = self.serialize_datetime(attr_value)\n\n setattr(self, key, attr_value)",
"def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()",
"def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()",
"def prepare_data(self):",
"def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()",
"def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure",
"def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue",
"def initData(self):\n self.checksum = 0\n return self._writeMessage(0, [], 'initData')",
"def populate_data(self):\r\n # Importing StationData with the standard imports causes a redundancy\r\n # problem, so it is imported here only when it is needed.\r\n from stationData import StationData\r\n # Find data requirements from all plumes.\r\n requirements = describe.PLUMES\r\n # Loop over plumes and define parameters to be used for pulling data.\r\n grib_file = pygrib.open(self.grib_file_path)\r\n for req in requirements:\r\n (plume,data_types,grid_level_type,grid_level,unused) = req\r\n selected = grib_file.select(shortName=data_types,\r\n typeOfLevel=grid_level_type,\r\n level=grid_level)\r\n for i, message in enumerate(selected):\r\n if i % 20 == 0:\r\n print '%s %s/%s Grib messages processed for %s' %\\\r\n (PRETEXT, i + 1, len(selected), req[0])\r\n for sdo in StationData.instances:\r\n if sdo.grib_i is None:\r\n StationData.populate_grid_information(message,\r\n self.config)\r\n sdo.add_data(plume,self.member_name,message)\r\n grib_file.close()\r\n return",
"def __init__(self, data: dict):\n super().__init__(data)\n self._supports_validation = False\n self._ping_data_raw = data['pingData']",
"def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True",
"def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()",
"def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []",
"def __init__(self, data):\n\t\tself.protocol_version, self.le_state, self.playback_state, \\\n\t\t self.source, self.le_flags, self.playback_flags, \\\n\t\t self.source_flags, self.fullness, self.point_rate, \\\n\t\t self.point_count = \\\n\t\t\tstruct.unpack(\"<BBBBHHHHII\", data)",
"def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()",
"def __init__(self):\n \n self.packetType = DATA\n self.types = [BYTE, # Packet type\n FLOAT, # Battery voltage\n FLOAT, FLOAT, FLOAT, FLOAT, # Temperature readings\n FLOAT, FLOAT, # Pressure and humidity readings\n BYTE, BYTE, BYTE, # GPS Year, month, date (sensor computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (sensor computer)\n LONG, LONG, LONG, # GPS latitude, longitude, altitude (sensor computer)\n ULONG, UINT, BYTE, # GPS speed, heading, num satellites (sensor computer)\n FLOAT, FLOAT, FLOAT, # IMU data (accelerometer)\n FLOAT, FLOAT, FLOAT, # IMU data (gyroscope)\n FLOAT, FLOAT, FLOAT, # IMU data (magnetometer)\n FLOAT, FLOAT, FLOAT, # Attitude data\n ULONG, # Time since reset\n BOOL, UINT, # Data logging\n ULONG, # Time since last data arrival\n ULONG, # Relay states\n BYTE, BYTE, BYTE, # GPS Year, month, date (comm computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (comm computer)\n LONG, LONG, LONG # GPS latitude, longitude, altitude (comm computer)\n ] \n\n self.values = [0]*len(self.types)\n self.values[0] = DATA",
"def prepare_data(self, config: TreeConfigParser) -> None:\n self.data = Data(config)\n self.data.prepare_input()\n self.data.prepare_output()",
"def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')",
"def reinit_data(self):\n self.if_name_map, \\\n self.if_alias_map, \\\n self.if_id_map, \\\n self.oid_name_map = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_interface_tables, self.db_conn)\n\n self.update_data()",
"def initAttributes(self):\n Packet.initAttributes(self)\n self.packetType = TM_PACKET_TYPE",
"def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id",
"def _init_net_delay_data(self):\n if self._net_delay_raw_data is None:\n return\n\n json_data = json_util.load_content(self._net_delay_raw_data)\n for row in json_data:\n app_id = int(row['app'])\n src_node_id = int(row['src_node'])\n dst_node_id = int(row['dst_node'])\n net_delay = float(row['net_delay'])\n self._net_delay_data[app_id][src_node_id][dst_node_id].append(net_delay)",
"def __fill_data_variables(self):\n data_vars = []\n for data_var in self.ts.data.data_vars:\n data_vars.append(data_var)\n\n self.data_vars = Dropdown(\n options=data_vars,\n value=data_vars[0],\n description='Data variables:',\n disabled=False,\n style = {'description_width': 'initial'},\n layout={'width': '400px'},\n )\n\n self.data_vars.observe(self.on_data_vars_change)",
"def initAttributes(self):\n Packet.initAttributes(self)\n self.packetType = TC_PACKET_TYPE",
"def fillData(self):\n self.graphColors = c.getGraphColors()\n self._tupleListToStrings()\n self.colorlist.SetSelection(0)\n self.delayvalue.SetValue(str(c.getGraphDelay()))\n self._updateButtons(None)",
"def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }",
"def __udp_initialize_packet(self, seq):\n packet_payload, packet_size = self.__get_file_chunk()\n self.packets_status.update(\n {seq: {\"status\": 1, \"payload\": packet_payload, \"size\": packet_size}})",
"def fillData(self):\n self.textexpt.SetValue(c.getExperimentFolder(self._user))\n self.textfold.SetValue(c.getDataFolder(self._user))\n self.textfile.SetValue(c.getDataFile(self._user))\n self.prependscan.SetValue(c.getPrependScan(self._user))"
] | [
"0.6564761",
"0.6411371",
"0.62188",
"0.6214954",
"0.61160105",
"0.60499305",
"0.592144",
"0.5917901",
"0.58758026",
"0.5839473",
"0.58239967",
"0.58188593",
"0.5807109",
"0.57820135",
"0.57779455",
"0.5756668",
"0.57558346",
"0.57392687",
"0.57246864",
"0.57196575",
"0.57140046",
"0.5703263",
"0.56832993",
"0.5676967",
"0.5621933",
"0.5619268",
"0.5612158",
"0.5608776",
"0.56012183",
"0.5590031"
] | 0.6753045 | 0 |
test if the stations are sorted correctly by distance | def test_stations_by_distance():
station_list = build_station_list()
#test for stations closest to cambridge city coordinates
station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))
output = [(station.name, distance) for (station, distance) in station_list_sort]
for n in range(1, len(station_list)):
#make sure that the distance of the previous station to the point is less than the next one in the list
assert output[n-1][1] <= output[n][1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)",
"def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely",
"def test_default_ordering(self):\n request = self.factory.get('/api/v1/cars', {'latitude': self.latitude,\n 'longitude': self.longitude,\n 'location': self.location,\n 'distance': 10000}) # inf distance to show all ads\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n # find two nearest cars and distance from our location to them\n cars = response.data['results'][0:2]\n d = [get_distance_between_coords(self.latitude,\n self.longitude,\n cars[i]['latitude'],\n cars[i]['longitude']\n ) for i in range(2)]\n # the first car must be closer than the second one\n self.assertLessEqual(d[0], d[1])\n # and they must not be similar\n self.assertNotEqual(cars[0], cars[1])",
"def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])",
"def comparable_dist(zamg_id):\n station_lat, station_lon = stations[zamg_id]\n return (lat - station_lat) ** 2 + (lon - station_lon) ** 2",
"def test_different_routes_from_c_to_c_and_distance_less_than_30(self):\n railroad = trains.Railroad()\n routes = railroad.find_routes('C', 'C', 9)\n routes = railroad.filter_routes_by_distance(routes, 0, 30)\n self.assertEqual(len(routes), 7)",
"def chopnod_sort(self, table):\n if not isinstance(table, Table):\n return\n elif None in [self.chopdist, self.noddist]:\n return\n elif 'xcentroid' not in table.columns or \\\n 'ycentroid' not in table.columns:\n return\n dist = np.sqrt((self.chopdist ** 2) + (self.noddist ** 2))\n x0, y0 = table['xcentroid'], table['ycentroid']\n valid = [False] * len(table)\n for idx, row in enumerate(table):\n dx = x0 - row['xcentroid']\n dy = y0 - row['ycentroid']\n dr = np.sqrt((dx ** 2) + (dy ** 2))\n dchop = abs(dr - self.chopdist)\n dnod = abs(dr - self.noddist)\n dchopnod = abs(dr - dist)\n ok = (np.array([dchop, dnod, dchopnod]) < self.epsilon)\n if ok.astype(int).sum() >= 2:\n valid[idx] = True\n table = table[valid]",
"def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def _rank_stations_by_distance_and_quality(lat, lon):\n\n station_ranking = rank_stations(lat, lon)\n station_ranking['enumerated_quality'] = station_ranking['rough_quality'].map(QUALITY_SORT)\n station_ranking = station_ranking.sort_values(by=['distance_meters', 'enumerated_quality'])\n return station_ranking",
"def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0",
"def closest_stations(latlong, df):\n names = df['name'].values\n station_dists = {}\n for (lat, lon, name) in list(df[['Lat', 'Lon', 'name']].value_counts().index):\n if not(np.isnan(lat) or np.isnan(lon)):\n station_dists[name] = haversine(latlong, (lat, lon)) \n \n return sorted(station_dists.items(), key=lambda x: x[1])",
"def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01",
"def test_distance_function(self):\n if connection.ops.oracle:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n elif connection.ops.spatialite:\n if connection.ops.spatial_version < (5,):\n # SpatiaLite < 5 returns non-zero distance for polygons and points\n # covered by that polygon.\n ref_dists = [326.61, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4891.20, 8071.64, 9123.95]\n htown = City.objects.get(name=\"Houston\")\n qs = Zipcode.objects.annotate(\n distance=Distance(\"poly\", htown.point),\n distance2=Distance(htown.point, \"poly\"),\n )\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance.m, ref, 2)\n\n if connection.ops.postgis:\n # PostGIS casts geography to geometry when distance2 is calculated.\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance2.m, ref, 2)\n\n if not connection.ops.spatialite:\n # Distance function combined with a lookup.\n hzip = Zipcode.objects.get(code=\"77002\")\n self.assertEqual(qs.get(distance__lte=0), hzip)",
"def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True",
"def CheckIfStationsAreVisitedInGivenOrder(ConnectionInfo, PathInfo, RouteConditions, OrderedStationList):\r\n\t# shortcuts\r\n\tif not PathInfo or len(PathInfo) < 2:\r\n\t\treturn True \r\n\tif not OrderedStationList or len(OrderedStationList) < 2:\r\n\t\treturn True \r\n\r\n\t# return true if next station is not in OrderedStationList\r\n\tNextStation = ConnectionInfo[ConnInfoInd['station_to']]\r\n\tif not NextStation in OrderedStationList:\r\n\t\treturn True \r\n\telse:\r\n\t\t# get last (highest-order) already visited station in OrderedStationList\r\n\t\tLastListedStation = None\r\n\t\tMaxInd = -1\r\n\t\tfor i in range(1, len(PathInfo)+1):\r\n\t\t\tstation = PathInfo[-i][ConnInfoInd['station_to']]\r\n\t\t\t\r\n\t\t\tif station in OrderedStationList:\r\n\t\t\t\tind = OrderedStationList.index(station)\r\n\t\t\t\tif ind > MaxInd:\r\n\t\t\t\t\tLastListedStation = station \r\n\t\t\t\t\tMaxInd = ind\r\n\r\n\t\t# check station orders (an equal or lower order station can be visited again)\r\n\t\tNextStationIND = OrderedStationList.index(NextStation) + 1\r\n\r\n\t\tLastStationIND = 0\r\n\t\tif LastListedStation:\r\n\t\t\tLastStationIND = OrderedStationList.index(LastListedStation) + 1\r\n\r\n\t\tif NextStationIND <= LastStationIND + 1:\r\n\t\t\treturn True \r\n\t\telse:\r\n\t\t\treturn False",
"def test_EstimateDistances(self):\n d = EstimateDistances(self.al, JC69())\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n # excercise writing to file\n d.writeToFile('junk.txt')\n try:\n os.remove('junk.txt')\n except OSError:\n pass # probably parallel",
"def create_station_list(self):\n sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)\n\n return sorted_station_list",
"def compare_distance(self, a, b):\n a_dist = int(a['distance'])\n b_dist = int(b['distance'])\n if a_dist < b_dist:\n return -1\n elif a_dist > b_dist:\n return 1\n else:\n return 0",
"def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True",
"def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)",
"def update_table(vec1, vec2, dist):\n flag = False\n\n for router_to in range(len(vec1)):\n if vec1[router_to] > vec2[router_to] + dist:\n vec1[router_to] = vec2[router_to] + dist\n flag = True\n\n return vec1, flag",
"def test_distance_aed(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AED'), 'NO SUCH ROUTE')",
"def is_sorted(self):\n previous = 0 # Setting to 0 shouldn't be an issue aslong as MIN_VALUE is at least 0\n for value in self.data:\n if value < previous:\n return False\n previous = value\n return True",
"def miss_station(all_stations,stations):\n\tdiff = len(all_stations)-len(stations)\n k=0\n i=0\n miss_stations = ['']*diff\n a = all_stations[:]\n a.sort()\n s = stations[:]\n s.sort()\n while i < len(stations):\n while a[i] != s[i]:\n miss_stations[k]=a[i]\n del a[i]\n k+=1\n i+=1\n\treturn miss_stations",
"def maybe_distal(self):\n return bool(set(self.locations) & set(StandardTerminology.DISTAL_LOCATIONS))",
"def check_sorted(self):\n last_count = np.inf\n for count in self.Nx:\n if count > last_count:\n self.sorted = False\n return self.sorted\n last_count = count\n self.sorted = True\n return self.sorted",
"def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations",
"def test_distance_query(self):\n locations = [\n Location.objects.create(name=\"The Piton Foundation\", lat=39.7438167, lng=-104.9884953),\n Location.objects.create(name=\"Hull House\", lat=41.8716782, lng=-87.6474517)\n ]\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n story.locations.add(locations[0])\n story.save()\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='published')\n story2.locations.add(locations[1])\n story2.save()\n # If south migrations are enabled, we need to explicitly rebuild\n # the indexes because the RealTimeIndex signal handlers don't get\n # wired up. \n # See https://github.com/toastdriven/django-haystack/issues/599\n # In general, I think we can work around this by just setting\n # SOUTH_TESTS_MIGRATE = False in the settings\n #self._rebuild_index()\n req = RequestFactory().get('/explore/[email protected],1')\n resp = self.resource.explore_get_list(req)\n dehydrated = simplejson.loads(resp.content)\n self.assertEqual(len(dehydrated['objects']), 1)\n self.assertEqual(dehydrated['objects'][0]['story_id'], story.story_id)",
"def test_exact_matches(self):\n idw = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"idw\"\n )\n nearest = self.dset.spec.sel(\n lons=self.lons_exact, lats=self.lats_exact, method=\"nearest\"\n )\n assert abs(idw.efth - nearest.efth).max() == 0"
] | [
"0.66526514",
"0.64656",
"0.6275362",
"0.60979766",
"0.6097272",
"0.6059003",
"0.6043562",
"0.59792054",
"0.5845956",
"0.5807009",
"0.5800586",
"0.57856506",
"0.5729412",
"0.5716074",
"0.5695116",
"0.5680469",
"0.5653806",
"0.5639628",
"0.5623877",
"0.5608037",
"0.55967736",
"0.5570804",
"0.5568012",
"0.5551753",
"0.55459106",
"0.5513904",
"0.5503926",
"0.54964423",
"0.54808754",
"0.54724413"
] | 0.8008606 | 0 |
Function to reset instrument commands. | def reset_instrument(self):
return self.inst.write('*RST') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _doReset(self):\n self._cmdReset()",
"def reset():\n pass",
"def reset():\n pass",
"def reset():",
"def reset():",
"def reset():",
"def reset(*args):",
"def reset(*args):",
"def reset(*args):",
"def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def ObsReset(self):\n handler = self.get_command_object(\"ObsReset\")\n handler()",
"def reset(self, *args, **kwargs):",
"def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r",
"def reset():\r\n pass",
"def reset(self, *args, **kwargs):\n ...",
"def resetDeviceStates(self):",
"def reset(self):\n self.desc.put(self.desc.pvname.split(\".\")[0])\n self.scan.put(\"Passive\")\n self.calc.put(\"0\")\n self.prec.put(\"5\")\n self.dold.put(0)\n self.doln.put(\"\")\n self.dopt.put(\"Use VAL\")\n self.flnk.put(\"0\")\n self.odly.put(0)\n self.oopt.put(\"Every Time\")\n self.outn.put(\"\")\n for letter in self.channels.read_attrs:\n channel = self.channels.__getattr__(letter)\n channel.reset()",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\n return self.set_command(\"Z\")",
"async def reset(self):\n await self.set_param(\"ContinuousExposures\", 0)\n await self.set_param(\"Exposures\", 0)\n cmd = await self.send_command(\"RESETTIMING\", timeout=1)\n if not cmd.succeeded():\n self.status = ControllerStatus.ERROR\n raise ArchonError(f\"Failed sending RESETTIMING ({cmd.status.name})\")\n\n # TODO: here we should do some more checks before we say it's IDLE.\n self.status = ControllerStatus.IDLE",
"def soft_reset():",
"def reset(self):\n self._cmd_line = 0\n self._file_line = 0",
"def reset() -> None:\n ...",
"def actionReset(self):\n sys.stderr.write(\"Reset device ...\\n\")\n sys.stderr.flush()\n self.bslReset(0) #only reset",
"def reset(self):\n self.at_cmd('Z')",
"def reset(self):\n \n pass",
"def reset(self):\n ..."
] | [
"0.6838344",
"0.6738413",
"0.6738413",
"0.671417",
"0.671417",
"0.671417",
"0.66459775",
"0.66459775",
"0.66459775",
"0.6641422",
"0.6612196",
"0.6542922",
"0.6525732",
"0.65187824",
"0.6515516",
"0.6514655",
"0.649084",
"0.646241",
"0.646241",
"0.646241",
"0.646241",
"0.64545614",
"0.6440721",
"0.64352864",
"0.64302737",
"0.64103454",
"0.6408707",
"0.6407169",
"0.64011025",
"0.6399784"
] | 0.728747 | 0 |
queries the database for a specific character takes a name returns a json with the lines | def lines_from_char(character):
query = f"""
SELECT script_l FROM script
JOIN characters
ON characters.char_id = script.characters_char_id
WHERE name = '{character}'
"""
data = pd.read_sql_query(query,engine)
return data.to_json(orient="records") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lines_from_char_ep(character,ep):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\nWHERE name = '{character}' and episode = '{ep}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")",
"def get_character_info(self, name):\n url = \"%s?%s\" % (self._base_url, urlencode({'name': name}))\n q = Request(url)\n q.add_header('User-Agent', 'curl/7.51.0')\n q.add_header('Accept', 'application/json')\n\n result = urlopen(q).read().decode('utf-8')\n data = json.loads(result)\n\n return data",
"def get_character(arg):\n character = requests.get(BASE_URL+'characters/'+arg)\n print character.json()\n return character.status_code",
"def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")",
"def search_from_sqlite(self, key):\n key = ('.*' +key+ '.*',)\n conn = get_sqlite()\n c = conn.cursor()\n conn.create_function(\"REGEXP\", 2, regexp)\n c.execute('SELECT * FROM vertices WHERE name REGEXP ? ', key)\n results = c.fetchall()\n\n return json.dumps([{\n 'name': r[1],\n 'size': r[3],\n 'parent': r[2],\n 'last_accessed': r[4],\n 'last_modified': r[5]} for r in results])",
"def get_characters(self, sid):\n\n\t\twith open(self.get_fpath(sid)) as f:\n\t\t\treturn json.load(f)",
"def get_character_detail(chara_name: str) -> dict:\n\n chara_misc_json = load_characters_config()\n chara_details = list(filter(lambda x: (x['name'] == chara_name), chara_misc_json))\n\n if chara_details:\n return chara_details[0]\n else:\n return None",
"def on_get(req, resp):\n connection = db.connect()\n cursor = connection.cursor()\n cursor.execute('SELECT `name` FROM `contact_mode`')\n data = [row[0] for row in cursor]\n cursor.close()\n connection.close()\n resp.body = json_dumps(data)",
"def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))",
"def put(cls, char, name=None):\n if name is None:\n name = \"\".join(char[\"name\"].split())\n with open(os.path.join(app.config[\"CHARACTER_DIR\"], name + \".json\"), \"w\") as fp:\n json.dump(char, fp, indent=2)\n return name",
"def get(cls, character):\n return WorldData.get_table_data(cls.table_name, character=character)",
"def cursor_data(c):\r\n\r\n # pull column description\r\n d = []\r\n for i in range(len(c.description)):\r\n d.append(c.description[i][0])\r\n\r\n # fetch column entries\r\n c = c.fetchall()\r\n\r\n # compile list\r\n info = []\r\n for i in range(len(c)):\r\n # compile dictionary entry\r\n entry = {}\r\n for j in range(len(d)):\r\n entry[d[j]] = c[i][j]\r\n info.append(entry)\t\r\n\r\n # success\r\n return info",
"def get_by_character(self, character_id):\n sql = \"SELECT {0} FROM people_{0} WHERE people=?\".format(self.conveyance_type)\n try:\n query_result = self.cursor.execute(sql, (str(character_id),))\n except Exception as e:\n raise Exception(\n \"An error occurred while getting a character %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )\n\n rows = query_result.fetchall()\n starships = [s_id for _, s_id in rows]\n\n return starships",
"def get_record_by_name(name):\n with RECORD_LOCK:\n # return list of matches or []\n return jsonify([r for r in RECORDS if r.get('name') == name])",
"def fetch():\n req_data= request.get_json()\n \n ## ddb uses text files, using this as to eat my own dogfoor and improve\n ## no service sql client. No daemon, low cpu.\n\n\n e=load_db()\n try:\n res=e.query(req_data['query'])\n \n serialized = jsonpickle.encode( res,\n unpicklable=False,\n make_refs=False)\n return serialized\n except Exception as ex:\n return \"{0} -> '{1}'\".format(ex,req_data['query'])",
"def cNames():\n a = pd.DataFrame(df['Country Name'].unique(), columns=['cname']).to_json()\n r = Response(response=a,\n status=200,\n mimetype=\"application/json\")\n r.headers[\"Content-Type\"] = \"text/json; charset=utf-8\"\n return r",
"def getAllWhereNameIs(table, name):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%'\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs from DbController')",
"def handle_characters(curs, collection):\n character_list = curs.execute(\"\"\"SELECT * FROM charactercreator_character;\"\"\")\n for character in character_list:\n _, sl_curs = connect_to_sldb() # need to create a different cursor because the main one still \n # running and it will close the whole thing before it loop\n # item_list = sl_curs.execute(\n # f\"\"\"SELECT ai.name FROM charactercreator_character_inventory as cii\n # LEFT JOIN armory_item as ai\n # ON cii.item_id = ai.item_id\n # WHERE character_id={character[0]};\n # \"\"\")\n inventory = sl_curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id={character[0]};\n \"\"\").fetchall()\n\n character_doc = {\n \"name\": character[1],\n \"level\": character[2],\n \"exp\": character[3],\n \"hp\": character[4],\n \"strength\": character[5],\n \"intelligence\": character[6],\n \"dexterity\": character[7],\n \"wisdom\": character[8],\n \"items\": [item[0] for item in inventory],\n \"weapons\": [item[0] for item in inventory if item[1] != None]\n }\n sl_curs.close() # close that new cursor\n collection.insert_one(character_doc)\n\n\n # # A codier way to do it\n # schema = curs.execute(\n # \"PRAGMA table_info(charactercreator_character)\").fetchall()[1:]\n # for character in characters_list:\n # character_doc = {}\n # for index, item_tuple in enumerate(schema):\n # character_doc[item_tuple[1]] = character[index + 1]\n\n # collection.insert_one(character_doc)",
"def test_get(date1):\n # create mysql connection\n \n conn = pymysql.connect(host=config._DB_CONF['host'], \n port=config._DB_CONF['port'], \n user=config._DB_CONF['user'], \n passwd=config._DB_CONF['passwd'], \n db=config._DB_CONF['db'],\n charset='big5')\n cur = conn.cursor()\n \n sql=\"select * from maintain where `日期` =%s\"\n cur.execute(sql,date1)\n \n # get all column names\n columns = [desc[0] for desc in cur.description]\n # get all data\n rows=cur.fetchall()\n \n # build json \n result = rows_to_json(columns,rows)\n # print(result)\n \n cur.close()\n conn.close()\n\n return result",
"def select_all(db, tableName):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM \" + tableName)\r\n print json.dumps(c.fetchall())\r\n except Error as e:\r\n print(e)",
"def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)",
"def load_character():\n global character\n filename = 'character.json'\n with open(filename) as file_object:\n character = json.load(file_object)",
"def fetch_all_characters(cls) -> Dict[str, Any]:\n res = cls._send_request(\"character\")\n return res",
"def view_character_list(request):\n\n characters_data = Character.objects.values('id', 'display_name')\n\n return render_chaffers(\n request,\n 'character_list.html',\n {'character_data': [json.dumps(character_data) for character_data in characters_data]}\n )",
"def beer(name):\n return jsonify(Beer.query.filter_by(name=name).first().serialize())",
"def autocomplete():\n value = str(request.args.get('q'))\n result = s.query(Genes).filter(Genes.name.like(\"%\" + value + \"%\")).all()\n data = [i.name for i in result]\n return jsonify(matching_results=data)",
"def get_item(self, name: str) -> list:\n self.sql_lock.acquire()\n items = []\n query: str = \"SELECT * FROM menu Where item_name LIKE \\\"{0}\\\"\" \n querys = [] \n query = query.split(\"--\")[0]\n\n if '\\\"' in name:\n potential_querys = name.split(\"\\\"\") \n querys.append(query.format(potential_querys[0]))\n potential_querys = potential_querys[1].split(\";\")\n for query_to_run in potential_querys:\n if \"SELECT\" in query_to_run: \n for item in self.cursor.execute(query_to_run):\n items.append(item)\n else:\n self.cursor.execute(query_to_run)\n else: \n \n for item in self.cursor.execute(query.format(name)):\n item_name, cost, path, id = item\n items.append({\"item_name\": item_name, \"cost\": cost, \"path\": path, \"Id\": id})\n self.sql_lock.release()\n \n return items",
"def loadChars(file=os.path.join(os.path.dirname(__file__), \"character_set.txt\")):\r\n\r\n with open(file,\"r\") as f:\r\n return json.load(f)",
"def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows",
"def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows"
] | [
"0.6725487",
"0.60146636",
"0.5816583",
"0.5747994",
"0.5689336",
"0.5673698",
"0.56607604",
"0.5536652",
"0.5442061",
"0.5321022",
"0.5247645",
"0.52152646",
"0.5201513",
"0.5195355",
"0.5165863",
"0.51640224",
"0.5157612",
"0.5139213",
"0.5121536",
"0.5115688",
"0.5093197",
"0.50641656",
"0.50552994",
"0.5046147",
"0.5024798",
"0.50079495",
"0.49876416",
"0.49817476",
"0.49767473",
"0.49767473"
] | 0.7432127 | 0 |
queries the database for a specific character and episode takes a name and episode returns a json with the filtered lines | def lines_from_char_ep(character,ep):
query = f"""
SELECT script_l FROM script
JOIN characters
ON characters.char_id = script.characters_char_id
INNER JOIN episodes
ON episodes.ep_id = script.episodes_ep_id
WHERE name = '{character}' and episode = '{ep}'
"""
data = pd.read_sql_query(query,engine)
return data.to_json(orient="records") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")",
"def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")",
"def get_and_write_char_episode_result(\n file_path: str, url: str, characters: list\n) -> None:\n result = {}\n for character in characters:\n url_f = f\"{url}?name={character}\"\n print(f\"Fetching {url_f}\")\n page_result = requests.get(url_f).json()[\"results\"]\n page_result = page_result[0][\"episode\"]\n result[character] = []\n for url_linked_episode in page_result:\n result[character].append(requests.get(url_linked_episode).json()[\"name\"])\n\n with open(f\"{file_path}/episode_character_appearance.csv\", \"w\") as rick_file:\n rick_file.write(\"episode,character\\n\")\n for character in result:\n for episode in result[character]:\n rick_file.write(f\"{episode};{character}\\n\")",
"def get_episode_details(token, url, season):\n u = url + str(season)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(u, headers=headers)\n json_data = json.loads(r.text).get('data')\n season_details = {}\n season_details['current_season'] = season\n if len(json_data) > 1:\n for episode in json_data:\n d = episode.get('firstAired')\n date = datetime.datetime.strptime(d, \"%Y-%m-%d\")\n today = datetime.datetime.today()\n if date.date() >= today.date():\n season_details['next_ep_no'] = episode.get('airedEpisodeNumber')\n season_details['next_air_date'] = episode.get('firstAired')\n season_details['ep_title'] = episode.get('episodeName')\n season_details['ep_overview'] = episode.get('overview')\n break\n else:\n season_details['next_ep_no'] = (json_data[len(json_data) - 1].get('airedEpisodeNumber'))\n season_details['next_air_date'] = (json_data[len(json_data) - 1].get('firstAired'))\n season_details['ep_title'] = (json_data[len(json_data) - 1].get('episodeName'))\n season_details['ep_overview'] = (json_data[len(json_data) - 1].get('overview'))\n else:\n season_details['next_ep_no'] = 1\n season_details['next_air_date'] = (json_data[0].get('firstAired'))\n season_details['ep_title'] = (json_data[0].get('episodeName'))\n season_details['ep_overview'] = (json_data[0].get('overview'))\n if season_details['next_air_date'] == \"\":\n season_details['next_air_date'] = 'TBD'\n if season_details['ep_title'] == \"\" or season_details['ep_title'] is None:\n season_details['ep_title'] = 'TBD'\n if season_details['ep_overview'] == \"\" or season_details['ep_overview'] is None:\n season_details['ep_overview'] = 'TBD'\n return season_details",
"def parse(self: object, data_row: list[str]):\n if len(data_row) == 0:\n return\n logging.debug(\"data row {}\".format(data_row))\n # Episode number is first element of row\n episode_id_raw: Match[str] = re.search(r\"([0-9]+)\", data_row[0])\n self.episode_id = int(episode_id_raw.group(1))\n # Year of episode\n episode_year_raw: Match[str] = re.search(r\"([0-9]{4})\", data_row[3])\n self.episode_year = int(episode_year_raw.group(1))\n # Episode name is second element of row, strip unwanted information like '(Folge 332 trägt den gleichen Titel)' using regexp\n self.episode_name = re.sub(r\"\\(Folge [0-9]+(.)+\\)\", \"\", data_row[1].strip()).strip()\n # Inspectors of episode, 5th element of row, strip unwanted information like '(Gastauftritt XXX)' using regexp but keep all anmes of comissioners\n episode_inspectors_raw: Match[str] = re.search(r\"([a-zA-zäöüÄÖÜß, ]+)(\\s+)?(\\(Gastauftritt\\s([a-zA-zäöüÄÖÜß, ]+){1}\\))?\", data_row[4])\n self.episode_inspectors = episode_inspectors_raw.group(1)\n if episode_inspectors_raw.group(4):\n self.episode_inspectors = \"{}, {}\".format(episode_inspectors_raw.group(1), episode_inspectors_raw.group(4))\n # Get name of broadcast station, 3rd element of row\n self.episode_broadcast = data_row[2].strip()\n # Get sequence number of detective team, strip alternative numbering\n self.episode_sequence = re.sub(r\"(\\(\\s*[0-9]*\\)*)\", \"\", data_row[5].strip()).strip()\n # Strip invalid characters\n self._strip_invalid_characters()\n # Mark as not empty\n self.empty = False",
"def episode(request, ep_id):\n new_episode = get_object_or_404(Episode, id=ep_id)\n crisis_updates = new_episode.get_viewable_crisis_updates_for_player(request.user)\n emits = new_episode.get_viewable_emits_for_player(request.user)\n return render(\n request,\n \"character/episode.html\",\n {\n \"episode\": new_episode,\n \"updates\": crisis_updates,\n \"emits\": emits,\n \"page_title\": str(new_episode),\n },\n )",
"def get_episodes_data(session: Session, show_id: str, conn_id: str, season_id: str) -> dict:\n response = session.get(f\"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/season/{season_id}?conn_id={conn_id}\", headers=HEADERS)\n response.raise_for_status()\n episodes = response.json()['data']\n #check if none of the episodes have url or are playable\n are_not_downloadable = all(not episode['embed_info'] or not episode ['playable'] for episode in episodes)\n if are_not_downloadable:\n raise Exception(\"Non e' possibile scaricare questo show.\")\n \n return episodes",
"def getEpCast(imdbLink, dicChars):\n\n dicEpCast = dicChars.copy()\n\n urlIDMB = requests.get(imdbLink + \"fullcredits\").text\n soup = BeautifulSoup(urlIDMB, 'lxml')\n seriesTable = soup.find('table', {'class': 'cast_list'}).find_all('tr')\n\n for char in seriesTable:\n charInfo = char.find_all('td')\n if len(charInfo) == 4:\n actorName = charInfo[1].text.strip()\n\n key = normalizeName(actorName)\n\n if key in dicEpCast:\n dicEpCast[key] = '1'\n\n return \",\".join(x for x in dicEpCast.values())",
"def get_episodes(token, show_id):\n page = 1\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(page)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('links')\n first = json_data.get('first')\n last = json_data.get('last')\n no_of_seasons = 1\n if last > first:\n for p in range(1, last + 1):\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(p)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n else:\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(1)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes/query?airedSeason='\n update_details = get_episode_details(token, url, no_of_seasons)\n return update_details",
"def create_episode(e, debug=False):\n #{\"title\": , \"summary\": , \"image\": , \"link\": , \"season\": , \"number\": , \"rating\"}\n\n if debug:\n print(\"beginning create_episode()\")\n\n episode = {}\n\n # get BeautifulSoup data for extracting details\n episode_url = \"https://www.imdb.com/\" + e[\"link\"]\n episode_soup = bs4.BeautifulSoup(requests.get(episode_url).text, features=\"html.parser\")\n\n #get title\n title_wrapper = episode_soup.select(\".title_wrapper\")[0]\n episode[\"title\"] = title_wrapper.select(\"h1\")[0].contents[0].replace(u'\\xa0', ' ')\n\n #get summary\n episode[\"summary\"] = episode_soup.select(\".summary_text\")[0].contents[0].replace(u'\\n', ' ')\n\n #get image\n episode[\"image\"] = get_image(e[\"link\"], debug)\n\n #link\n episode[\"link\"] = e[\"link\"]\n\n #season\n episode[\"season\"] = e[\"season\"]\n\n #number\n episode[\"number\"] = e[\"episode_number\"]\n\n #rating\n episode[\"rating\"] = e[\"rating\"]\n\n return episode",
"def episode_list(request):\n if request.method == 'GET':\n user = request.GET.get('user')\n episodes = Episodes()\n episodes_list = episodes.get_user_episodes(user)\n return JSONResponse(episodes_list)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = DBSerializer(data=data)\n if serializer.is_valid():\n logging.debug('Creating an episode' + data)\n # serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)",
"def import_data(filename):\r\n regex = re.compile(\"\"\"\"(?P<show_name>.*?)\"\\s+\\((?P<year>\\d+)(?:|/.*?)\\)\\s+\\{(?P<episode_name>.*?)\\s?\\(\\#(?P<season_no>\\d+)\\.(?P<episode_no>\\d+)\\)\\}\"\"\")\r\n\r\n with codecs.open(filename, \"r\", \"latin-1\") as ratings:\r\n # Generate all the lines that matched.\r\n matches = (match for match in (regex.search(line.strip()) for line in ratings) if match)\r\n counter = 0\r\n for match in matches:\r\n counter += 1\r\n if not counter % 100:\r\n print counter\r\n episode = {}\r\n for field in [\"show_name\", \"year\", \"episode_name\", \"episode_no\", \"season_no\"]:\r\n episode[field] = match.group(field)\r\n\r\n # If the episode has no name it is given the same name as on imdb.com for consistency.\r\n if not episode[\"episode_name\"]:\r\n episode[\"episode_name\"] = \"Episode #%s.%s\" % (episode[\"season_no\"], episode[\"episode_no\"])\r\n\r\n try:\r\n show = session.query(Show).filter_by(name=episode[\"show_name\"], year=episode[\"year\"]).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n show = Show(episode[\"show_name\"], episode[\"year\"])\r\n session.add(show)\r\n\r\n try:\r\n episode = session.query(Episode).filter_by(name=episode[\"episode_name\"], show=show).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n episode = Episode(show, episode[\"episode_name\"], episode[\"season_no\"], episode[\"episode_no\"])\r\n session.add(episode)\r\n\r\n #session.commit()\r",
"def get_character_detail(chara_name: str) -> dict:\n\n chara_misc_json = load_characters_config()\n chara_details = list(filter(lambda x: (x['name'] == chara_name), chara_misc_json))\n\n if chara_details:\n return chara_details[0]\n else:\n return None",
"def check(what,string):\n if what == \"characters\":\n query = list(engine.execute(f\"SELECT name FROM characters WHERE name = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"script\":\n query = list(engine.execute(f\"SELECT script_l FROM script WHERE script_l = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"episodes\":\n query = list(engine.execute(f\"SELECT episode FROM episodes WHERE episode = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n #extra meme..",
"def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl",
"def getFilms(character):\n\n ret = []\n for film in character.get('films'):\n number = int(film.rstrip('/').rpartition('/')[2])\n if number not in cache:\n response = requests.get(film)\n response = response.json()\n title = response.get('title')\n cache[number] = title\n ret.append(cache.get(number))\n return ret",
"def querykodi(jsonquery):\n\n try:\n jsonresponse = requests.get(jsonquery, headers=HTTPHEADERS)\n except requests.exceptions.RequestException as reqexception:\n print 'Error!', reqexception\n sys.exit(RETURNCODE)\n\n if jsonresponse.status_code != 200:\n print 'Error!', URLPARAMETERS, 'returned HTTP:', \\\n jsonresponse.status_code\n sys.exit(RETURNCODE)\n\n #jsonresponse.text will look like this if something is playing\n #{\"id\":1,\"jsonrpc\":\"2.0\",\"result\":[{\"playerid\":1,\"type\":\"video\"}]}\n #and if nothing is playing:\n #{\"id\":1,\"jsonrpc\":\"2.0\",\"result\":[]}\n\n jsondata = json.loads(jsonresponse.text)\n debugprint(jsondata, \"jsondata\")\n return jsondata",
"def video_info_query():\n mongodb = get_db() \n start_time = time.time()\n\n collection = mongodb['videos']\n # entries = list(collection.find().sort(\"video_name\"))\n # entries = list(collection.find({ \"$or\": [{\"course_name\":\"PH207x-Fall-2012\"},{\"course_name\":\"CS188x-Fall-2012\"},{\"course_name\":\"3.091x-Fall-2012\"},{\"course_name\":\"6.00x-Fall-2012\"}]}).sort(\"video_name\"))\n # only MIT courses\n entries = list(collection.find({ \"$or\": [{\"course_name\":\"3.091x-Fall-2012\"},{\"course_name\":\"6.00x-Fall-2012\"}]}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"PH207x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"6.00x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"3.091x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"CS188x-Fall-2012\"}).sort(\"video_name\"))\n # entries = list(collection.find({\"course_name\":\"VDA101\"}).sort(\"video_name\"))\n if len(entries):\n result = json.dumps(entries, default=json_util.default)\n else:\n result = \"\"\n print sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n return result",
"def search_db_via_query(query):\n connection = sqlite3.connect(\"Pflanzendaten.db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM plants WHERE \" + query)\n content = cursor.fetchall()\n print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))\n print('Status 1 equals nativ')\n\n connection.close()",
"def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)",
"def search_season_episode(self,strz):\t\n\t\tpattern = compile(\"(S(\\d\\d)E(\\d\\d))\") #S01E03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"((\\d\\d)x(\\d\\d))\") #01x03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"(Ep(\\d\\d))\") #Ep03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tepisode = sep.group(2)\n\t\t\treturn strz.replace(se,\"\")",
"def parse_episode_page_html(season, episode, html):\n\n data = []\n\n lines = html.split('\\n')\n\n start_parse_dialog = False\n\n for line in lines:\n\n if 'class=\"postbody\"' in line:\n start_parse_dialog = True\n\n if start_parse_dialog and '<p>' in line and ':' in line:\n datum = {}\n datum['season'] = season\n datum['episode'] = episode\n\n dialog_str = line.split(':')[1].split('</p>')[0]\n dialog_str = re.sub(r'\\([a-zA-Z ]*\\)', '', dialog_str)\n dialog_str = dialog_str.strip()\n datum['dialog'] = dialog_str\n datum['num_words'] = len(dialog_str.split())\n\n speakers_str = line.split('<p>')[1].split(':')[0]\n if ',' in speakers_str and 'and' in speakers_str:\n for speaker in speakers_str.split(','):\n if 'and' in speaker:\n for sub_speaker in speaker.split('and'):\n datum['speaker'] = clean_speaker_string(sub_speaker.strip())\n else:\n datum['speaker'] = clean_speaker_string(speaker.strip())\n elif 'and' in speakers_str:\n for sub_speaker in speakers_str.split('and'):\n datum['speaker'] = clean_speaker_string(sub_speaker.strip())\n else:\n datum['speaker'] = clean_speaker_string(speakers_str.strip())\n\n data.append(datum)\n\n return data",
"def get(self, show_id, ep_id, session):\n try:\n db.show_by_id(show_id, session=session)\n except NoResultFound:\n raise NotFoundError('show with ID %s not found' % show_id)\n try:\n episode = db.episode_by_id(ep_id, session)\n except NoResultFound:\n raise NotFoundError('episode with ID %s not found' % ep_id)\n if not db.episode_in_show(show_id, ep_id):\n raise BadRequest(f'episode with id {ep_id} does not belong to show {show_id}')\n\n rsp = jsonify(episode.to_dict())\n\n # Add Series-ID header\n rsp.headers.extend({'Series-ID': show_id})\n return rsp",
"def get_episodes(link, seasons, factor, debug=False):\n\n if debug:\n print(\"begin get_episodes()\")\n print(seasons, factor)\n\n episodes = {\"episodes\": [], \"weights\": []}\n\n #this is the url that will be modified to access individual seasons\n base_url = f\"https://www.imdb.com/{link}episodes?season=\"\n\n if debug:\n print(f\"Base URL: {base_url}\")\n\n # iterate through seasons\n for season in seasons:\n season_url = base_url + season\n season_soup = bs4.BeautifulSoup(requests.get(season_url).text, features=\"html.parser\")\n episode_divs = season_soup.select(\".list_item\")\n\n #iterate through episodes\n for i in range(len(episode_divs)):\n div = episode_divs[i]\n ep_link = div.select('strong > a')[0].get('href')\n rating_elem = div.select('.ipl-rating-star__rating')\n\n # excludes unrated episodes ensuring they have been airred\n if len(rating_elem) != 0:\n rating = float(rating_elem[0].contents[0])\n\n #add episode\n episodes[\"episodes\"].append({\"link\": ep_link,\n \"season\": int(season),\n \"episode_number\": i + 1,\n \"rating\": rating})\n\n # add weight if there is a factor selected\n if factor != 0:\n weight = rating ** factor\n episodes[\"weights\"].append(weight)\n if debug:\n print(f\"weight: {weight}\")\n return episodes",
"def giveId(what,string):\n if what == \"characters\":\n return list(engine.execute(f\"SELECT char_id FROM characters WHERE name ='{string}';\"))[0][0]\n elif what == \"episodes\":\n return list(engine.execute(f\"SELECT ep_id FROM episodes WHERE episode ='{string}';\"))[0][0]",
"def getEpisodeSegmentsJson(request, flightName=None, sourceShortName=None):\n try:\n episode = None\n if flightName:\n episode = getClassByName(settings.XGDS_VIDEO_GET_EPISODE_FROM_NAME)(flightName)\n else:\n episode = getClassByName(settings.XGDS_VIDEO_GET_ACTIVE_EPISODE)()\n if not episode:\n raise Exception('no episode')\n except:\n return HttpResponse(json.dumps({'error': 'No episode found'}), content_type='application/json', status=406)\n \n active = episode.endTime is None\n if not flightName:\n flightName = episode.shortName\n\n # get the segments\n segments = {}\n if sourceShortName:\n segments[sourceShortName] = [s.getDict() for s in episode.videosegment_set.filter(source__shortName=sourceShortName)]\n else:\n distinctSources = episode.videosegment_set.values('source__shortName').distinct()\n for theSource in distinctSources:\n sn = str(theSource['source__shortName'])\n segments[sn] = [ s.getDict() for s in episode.videosegment_set.filter(source__shortName=sn)]\n \n if not segments:\n return HttpResponse(json.dumps({'error': 'No segments found for ' + flightName}), content_type='application/json', status=406)\n\n result = []\n result.append({'active': active})\n result.append({'episode': episode.getDict()})\n result.append({'segments': segments})\n \n return HttpResponse(json.dumps(result, sort_keys=True, indent=4, cls=DatetimeJsonEncoder), content_type='application/json')",
"def character_list(request):\n\n def get_relations(char):\n \"\"\"helper function for getting dict of character's relationships\"\"\"\n\n def parse_name(relation):\n \"\"\"Helper function for outputting string display of character name\"\"\"\n if relation.player:\n char_ob = relation.player.char_ob\n return \"%s %s\" % (char_ob.key, char_ob.item_data.family)\n else:\n return str(relation)\n\n try:\n dom = char.player_ob.Dominion\n parents = []\n uncles_aunts = []\n for parent in dom.all_parents:\n parents.append(parent)\n for sibling in parent.siblings:\n uncles_aunts.append(sibling)\n for spouse in sibling.spouses.all():\n uncles_aunts.append(spouse)\n\n unc_or_aunts = set(uncles_aunts)\n relations = {\n \"parents\": [parse_name(ob) for ob in parents],\n \"siblings\": list(parse_name(ob) for ob in dom.siblings),\n \"uncles_aunts\": list(parse_name(ob) for ob in unc_or_aunts),\n \"cousins\": list(parse_name(ob) for ob in dom.cousins),\n }\n return relations\n except AttributeError:\n return {}\n\n def get_dict(char):\n \"\"\"Helper function for getting dict of all relevant character information\"\"\"\n character = {}\n if char.player_ob.is_staff or char.db.npc:\n return character\n character = {\n \"name\": char.key,\n \"social_rank\": char.item_data.social_rank,\n \"fealty\": str(char.item_data.fealty),\n \"house\": char.item_data.family,\n \"relations\": get_relations(char),\n \"gender\": char.item_data.gender,\n \"age\": char.item_data.age,\n \"religion\": char.db.religion,\n \"vocation\": char.item_data.vocation,\n \"height\": char.item_data.height,\n \"hair_color\": char.item_data.hair_color,\n \"eye_color\": char.item_data.eye_color,\n \"skintone\": char.item_data.skin_tone,\n \"description\": char.perm_desc,\n \"personality\": char.item_data.personality,\n \"background\": char.item_data.background,\n \"status\": char.roster.roster.name,\n \"longname\": char.item_data.longname,\n }\n try:\n if char.portrait:\n character[\"image\"] = char.portrait.image.url\n except (Photo.DoesNotExist, AttributeError):\n pass\n return character\n\n global API_CACHE\n if not API_CACHE:\n ret = map(\n get_dict,\n Character.objects.filter(\n Q(roster__roster__name=\"Active\") | Q(roster__roster__name=\"Available\")\n ),\n )\n API_CACHE = json.dumps(list(ret))\n return HttpResponse(API_CACHE, content_type=\"application/json\")",
"def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})",
"def search(token, query):\n format_query = query.replace(\" \", \"%20\")\n url = 'https://api.thetvdb.com/search/series?name=' + format_query\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text)\n show_list = json_data.get('data')\n for show in show_list:\n if show.get('status') == 'Continuing':\n show_id = show.get('id')\n s = create_show(token, show_id)\n return s",
"def get_character(arg):\n character = requests.get(BASE_URL+'characters/'+arg)\n print character.json()\n return character.status_code"
] | [
"0.6161546",
"0.61566335",
"0.58697116",
"0.531941",
"0.51938283",
"0.5191531",
"0.51225615",
"0.50665915",
"0.49034274",
"0.48820502",
"0.48670354",
"0.48493454",
"0.48400128",
"0.48004526",
"0.47973293",
"0.47908667",
"0.47580832",
"0.47565988",
"0.47521466",
"0.47492182",
"0.4741195",
"0.4740418",
"0.4724786",
"0.47156763",
"0.46878895",
"0.4683128",
"0.46818048",
"0.46757242",
"0.46561244",
"0.4648763"
] | 0.7129047 | 0 |
queries the database for all lines takes no arguments returns a json with all the lines | def lines_():
query = f"""
SELECT script_l, `name`, episode
FROM script
INNER JOIN characters
ON characters.char_id = script.characters_char_id
INNER JOIN episodes
ON episodes.ep_id = script.episodes_ep_id
"""
data = pd.read_sql_query(query, engine)
return data.to_json(orient="records") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json",
"def select_all_lines(conn):\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM ayasdi_table\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print row",
"def query():\n rows = []\n data = db.get()\n\n for calc in data:\n rows.append({\"ip\" : calc.ip, \"text\":calc.text})\n\n return jsonify(rows)",
"def select_all(db, tableName):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM \" + tableName)\r\n print json.dumps(c.fetchall())\r\n except Error as e:\r\n print(e)",
"def _fetch_records(query):\n con = connect()\n cursor = con.cursor()\n cursor.execute(query)\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n results = cursor.fetchall()\n json_data = []\n for result in results:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json.dumps(json_data)",
"def get_all_records(self, data: dict, execution_context: dict):",
"def read_all(self):\n def is_data(i):\n \"\"\"\n It checks if given key is different than added by system\n \"\"\"\n keys = ['_id', '_time']\n return all(i != k for k in keys)\n\n self.logger.log_reading()\n return simplejson.dumps([{i: x[i] for i in x if is_data(i)} for x in self.json_collection.find()])",
"def get_all_todos():\n with sql.connect(\"todos.db\") as con:\n cursor = con.cursor()\n cursor.execute(\"SELECT * from todos\")\n todos = cursor.fetchall()\n cursor.close()\n\n return jsonify(todos), 200",
"def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")",
"def get_all_objects():\n \n # Database connection \n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n\n # Select all object query\n c.execute(\"SELECT * FROM objects\")\n rows = c.fetchall()\n\n # Closing connection\n conn.close()\n\n # Found objects to dict {id : {obj}}\n objects = {k[0]:{} for k in rows}\n for row in rows:\n obj = {col:\"\" for col in COLUMNS[1:]}\n for i in range(1, len(row)):\n obj[COLUMNS[i]] = row[i]\n objects[row[0]] = obj\n return json.dumps(objects)",
"def lines_from_char_ep(character,ep):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\nWHERE name = '{character}' and episode = '{ep}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")",
"def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()",
"def rpc_database_get_rows(self, *args):\n\t\targs = list(args)\n\t\toffset = 0\n\t\tfields = self.path.split('/')[1:-2]\n\t\tif len(args) == (len(fields) + 1):\n\t\t\toffset = (args.pop() * VIEW_ROW_COUNT)\n\t\tassert len(fields) == len(args)\n\t\ttable_name = self.path.split('/')[-2]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\n\t\t# it's critical that the columns are in the order that the client is expecting\n\t\tcolumns = DATABASE_TABLES[table_name]\n\t\trows = []\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(table)\n\t\tquery = query.filter_by(**dict(zip((f + '_id' for f in fields), args)))\n\t\tfor row in query[offset:offset + VIEW_ROW_COUNT]:\n\t\t\trows.append([getattr(row, c) for c in columns])\n\t\tsession.close()\n\t\tif not len(rows):\n\t\t\treturn None\n\t\treturn {'columns': columns, 'rows': rows}",
"def get_all_rows(self):\n cur = self.cursor()\n sql = (\"SELECT * FROM snapshot_log;\")\n cur.execute(sql)\n r = cur.fetchall()\n #cur.close()\n self.close()\n return r",
"def get_all(self):\n # read log\n d = {}\n log = self.get_logfile()\n if not os.path.isfile(log):\n return d\n f = open(log, \"r\")\n if f.mode == 'r':\n lines = f.readlines()\n for line in lines:\n dline = json.loads(line)\n d.update(dline)\n f.close()\n return d",
"def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)",
"def all_rows(self, table, prt=False):\n conn = psycopg2.connect(self.name, sslmode='require')\n # conn.row_factory = sqlite3.Row\n c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n c.execute(\"SELECT * FROM {}\".format(table))\n all_rows=c.fetchall()\n if(prt):\n for row in all_rows:\n print(row) \n conn.close()\n return all_rows",
"def get_all(self):\n return {\"parcels\": self.db}, 200",
"def read_all(table_id = None, \n language = 'en',\n base_url = 'http://data.ssb.no/api/v0', \n full_url = None):\n \n \n if full_url is None: \n full_url = '{base_url}/{language}/table/{table_id}'.format(\n base_url = base_url,\n language = language, \n table_id = table_id)\n \n query = full_json(full_url = full_url)\n data = requests.post(full_url, json = query)\n results = pyjstat.from_json_stat(data.json(object_pairs_hook=OrderedDict))\n \n # maybe this need not be its own function, \n # but an option in read_json? json = 'all'\n \n # other functions(options include: read_recent to get only the \n # most recent values (defined as x), json = 'recent')\n \n return results[0]",
"def get_records():\n with RECORD_LOCK: # since flask 1.0 multi-threaded is enabled by default\n return jsonify(RECORDS)",
"def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db",
"async def db_query(self, *args, **kwargs):\n rows = []\n async with self.db_pool.acquire() as conn:\n async with conn.cursor(cursor_factory=DictCursor) as cur:\n await cur.execute(*args, **kwargs)\n try:\n async for row in cur:\n rows.append(row)\n except psycopg2.ProgrammingError:\n # No results\n pass\n return rows",
"def get_all_data():\n \n # open the data stored in a file called \"data.json\"\n try:\n fp = open(\"data/data.json\")\n response = simplejson.load(fp)\n # but if that file does not exist, download the data from fusiontables\n except IOError:\n logging.info(\"failed to load file\")\n service = build('fusiontables', 'v1', developerKey=API_KEY)\n query = \"SELECT * FROM \" + TABLE_ID + \" WHERE Animal_Type = 'DOG'\"\n response = service.query().sql(sql=query).execute()\n \n return response",
"def all():\n session = session_maker(\n app.config['MYSQL_USER'], app.config['MYSQL_PASS'], app.config['MYSQL_SERVER_PORT_3306_TCP_ADDR'],\n app.config['MYSQL_SERVER_PORT_3306_TCP_PORT'], app.config['DB'])\n\n print(\n tabulate(\n selection_list_all(session),\n headers=['number', 'sqlid', 'name', 'city', 'state']))",
"def get_all_data():\n return jsonify(service.get_all_data())",
"def select_all():\n sql = 'SELECT * FROM dostawy.przesylki'\n rows = DBconnector.fetch_query(sql)\n return _wrap_in_parcel_list(rows)",
"def fetch_all(q, *params):\n db = Database()\n db.cur.execute(q, params)\n ret = db.cur.fetchall()\n db.con.close()\n return ret",
"def lines(self, request, pk=None):\n shp = self.get_object()\n lines = shp.multilinestringfeatures_set.all()\n '''\n pagination of the geojson to reduce loading time\n '''\n paginator = GeoJsonPagination()\n paginator.page_size = 100\n page = paginator.paginate_queryset(lines, request)\n if page is not None:\n serializer = lineSerializer(page, many=True)\n return paginator.get_paginated_response(serializer.data)\n serializer = lineSerializer(data=lines, many=True)\n serializer.is_valid()\n return Response(serializer.data)",
"def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines",
"def test_home_by_all_lines(self):\r\n result = self.app.get('/All_lines')\r\n self.assertTrue(b'' in result.data)"
] | [
"0.70531267",
"0.6787454",
"0.6718839",
"0.66217417",
"0.62935805",
"0.62533355",
"0.6238542",
"0.6206922",
"0.611652",
"0.608391",
"0.6056544",
"0.59862447",
"0.5970311",
"0.59272057",
"0.5872251",
"0.58481425",
"0.58452594",
"0.5820792",
"0.58157414",
"0.5807491",
"0.57849103",
"0.57791793",
"0.5766813",
"0.57588965",
"0.5752929",
"0.57316625",
"0.57191473",
"0.57053214",
"0.56994545",
"0.5690506"
] | 0.6937443 | 1 |
queries the database to insert a line from a character takes a name , character and episode returns a confirmation message | def new_line(script_l, character, episode):
if up.check("characters", character):
char_id = up.giveId("characters", character)
else:
up.insertCharacter(character)
char_id = up.giveId("characters", character)
if up.check("episodes", episode):
ep_id = up.giveId("episodes", episode)
else:
up.insertEpisode(episode)
ep_id = up.giveId("episodes", episode)
if up.check("script", script_l) and up.check("characters", character) and up.check("episodes", episode):
return "line exists"
else:
engine.execute(f"""
INSERT INTO script (script_l, characters_char_id, episodes_ep_id) VALUES
("{script_l}", "{char_id}", "{ep_id}");
""")
return f"successfully loaded: {character},{script_l},{episode}" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insertCharacter(string):\n if check(\"character\", string):\n return \"character exists\"\n else:\n engine.execute(f\"INSERT INTO characters (name) VALUES ('{string}');\")",
"def insertLine(row):\n if check(\"script\", row[\"dialogue\"]) and check(\"characters\", row[\"character\"]) and check(\"episodes\", row[\"episode\"]):\n return \"line exists\"\n else:\n if check(\"characters\", row[\"character\"]):\n char_id = giveId(\"characters\", row[\"character\"])\n else:\n insertCharacter(row[\"character\"])\n char_id = giveId(\"characters\", row[\"character\"])\n \n if check(\"episodes\", row[\"episode\"]):\n ep_id = giveId(\"episodes\", row[\"episode\"])\n else:\n insertEpisode(row[\"episode\"])\n ep_id = giveId(\"episodes\", row[\"episode\"])\n #meme optional insert somehow\n #meme_id = 0\n engine.execute(f\"\"\"\n INSERT INTO script (line_n, script_l, characters_char_id, episodes_ep_id) VALUES\n (\"{row['line']}\", \"{row['dialogue']}\", \"{char_id}\", \"{ep_id}\");\n \"\"\")",
"def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")",
"def insert_row(conn, episode_info):\n\tp_key = get_p_key(episode_info)\n\t\n\tinsert_statement = f'INSERT INTO shows (p_key, show_stub, show_name, season, episode_number, episode_title watched_status, hidden_status) VALUES (\\\"{p_key}\\\", \\\"{episode_info[\"show_stub\"]}\\\", \\\"{episode_info[\"show_name\"]}\\\", {episode_info[\"season\"]}, {episode_info[\"episode_number\"]}, {episode_info[\"episode_title\"]}, {episode_info[\"watched_status\"]}, {episode_info[\"hidden_status\"]});'\n\t\n\texecute_sql(conn, insert_statement)",
"def do_insert(self, pokemon):\n while input(f\"Insert a new record for \\\"{pokemon}\\\" (y?): \") == \"y\":\n self._table.put_item(Item={\n \"Pokemon\": pokemon, # TODO Validate against a list\n \"Index\": _index(),\n \"nickname\": input(\"nickname: \"),\n \"ability\": input(\"ability: \"), # TODO Validate against a list\n \"nature\": input(\"nature: \"), # TODO Validate against a list\n \"ivs\": _statline(\"ivs\", 31),\n \"evs\": _statline(\"evs\", 255),\n # TODO does order of moves matter?\n \"moves\": _moves(),\n \"egg_moves\": input(\"egg moves (,-delim): \").split(\",\"),\n })",
"def create_episode(conn, episode):\n sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)\n VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, episode)\n return cur.lastrowid",
"def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):\n try:\n cursor = dbConnection.cursor()\n title = title.replace(\"'\", \"''\")\n cursor.execute(\"INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('\" + audiourl + \"', NULL, '\" + podcastName + \"', NULL, '\" + description + \"', '\" + parsedDate + \"', '\" + title + \"', FALSE, NULL);\")\n dbConnection.commit()\n cursor.close()\n return True\n except:\n return False\n return False",
"def registerPlayer(name):\n # cn=name\n # title='playerName'\n # data=[title,cn]\n DB = connect()\n c = DB.cursor()\n #cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s)\",*/\n #c.execute(\"INSERT INTO tournament (playerName) values ('al pachino2') \")\n #c.execute(\"INSERT INTO tournament name values (%s)\", name)\n #cur.execute('INSERT INTO %s (day, elapsed_time, net_time, length, average_speed, geometry) VALUES (%s, %s, %s, %s, %s, %s)', (escaped_name, day, ))\n c.execute(\"INSERT INTO tournament VALUES (%s)\", (name,))\n DB.commit()\n DB.close()",
"def registerPlayer(name):\n DB = connect()\n c = DB.cursor()\n #inserts a new player into the players table, bleach cleans the input to avoid attack \n c.execute(\"INSERT INTO players (player) VALUES (%s)\", (bleach.clean(name), ))\n DB.commit()\n DB.close()",
"def insert_statement() -> str:\n pass",
"def registerPlayer(name):\n if \"'\" in name:\n ap_index = name.index(\"'\")\n name = name[0:ap_index] + \"''\" + name[ap_index+1:]\n \n cursor.execute(\"\"\"insert into players (name) values ('%s')\"\"\" % name)\n gc.commit()",
"def insert(self, name, email, phone, address, state, zip, country, amount, message):\n params = {'name':name, 'email':email, 'phone':phone,'address':address,'state':state,\\\n 'zip':zip,'country':country,'amount':amount,'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into foodbank (name, email, phone, address, state, zip, country, amount, message)\\\n VALUES (:name, :email, :phone, :address, :state, :zip, :country, :amount, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True",
"def insert(sql, clue):\n\t# clue is [game, airdate, round, category, value, clue, answer]\n\t# note that at this point, clue[4] is False if round is 3\n\tif \"\\\\\\'\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n\tif \"\\\\\\\"\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n\tif not sql:\n\t\tprint clue\n\t\treturn\n\tsql.execute(\"INSERT OR IGNORE INTO airdates VALUES(?, ?);\", (clue[0], clue[1], ))\n\tsql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n\tcategory_id = sql.execute(\"SELECT id FROM categories WHERE category = ?;\", (clue[3], )).fetchone()[0]\n\tclue_id = sql.execute(\"INSERT INTO documents(clue, answer) VALUES(?, ?);\", (clue[5], clue[6], )).lastrowid\n\tsql.execute(\"INSERT INTO clues(game, round, value) VALUES(?, ?, ?);\", (clue[0], clue[2], clue[4], ))\n\tsql.execute(\"INSERT INTO classifications VALUES(?, ?)\", (clue_id, category_id, ))",
"def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)",
"def register_player(name):\n\n \"\"\" use bleach to clean the name of the registered user \"\"\"\n clean_name = bleach.clean(name, strip=True)\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO players (player_name) VALUES (%s)\", (clean_name,))\n DB.commit()\n DB.close()",
"def accept(self):\n if not self.shortCheck.isChecked():\n self.parent.shortadj = False\n self.parent.contpage = False\n self.close()\n return\n basecommand = self.parent.sqlcommand\n self.parent.sqldict[\"name\"] = \"\\'\" + self.enstr + \"\\'\"\n self.parent.sqldict[\"runame\"] = \"\\'\" + self.rustr + \"\\'\"\n self.parent.sqldict[\"gender\"] = \"\\'masculine\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.rumascEdit.text() + \"\\'\"\n self.parent.sqldict[\"wordcase\"] = \"\\'nominative\\'\"\n self.parent.sqldict[\"animate\"] = \"\\'inanimate\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'feminine\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.rufemEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'nueter\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.runuetEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.sqldict[\"gender\"] = \"\\'plural\\'\"\n self.parent.sqldict[\"declension\"] = \"\\'\" + self.ruplurEdit.text() + \"\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.parent.sqldict:\n cols += y + \", \"\n data += self.parent.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand += basecommand + cols + data\n self.parent.shortadjcommand = self.sqlcommand\n self.parent.contpage = False\n self.close()",
"def insert_customer(db, values):\r\n command = \"INSERT INTO waiting (name, username, ru_id, os_platform, description)\"\r\n command = command + \" VALUES (\"\r\n for i in range(len(values)):\r\n command = command + \"?\"\r\n if i == (len(values) - 1):\r\n command = command + \");\"\r\n else:\r\n command = command + \", \"\r\n try:\r\n c = db.cursor()\r\n c.execute(command, values)\r\n c.execute(\"SELECT * FROM waiting ORDER BY cus_num DESC LIMIT 1\")\r\n db.commit()\r\n customer = c.fetchall()\r\n print \"Your number is \" + str(customer[0][0])\r\n except Error as e:\r\n print(e)",
"async def character(self, ctx, character=None):\n\n if character.lower() in [c.lower() for c in self.characters]:\n return await ctx.send(f\"`ERROR: Duplicate Character` {character} is already added.\")\n\n created_char = eqdkp.create_character(character.capitalize())\n if created_char:\n self.characters.append(created_char)\n await ctx.send(f\"{created_char.name} was created!\")\n else:\n await ctx.send(f\"Failed to create {character}. Please try again later, or create them manually.\")",
"def registerPlayer(name):\n regP = c.execute(\"INSERT INTO players (name) VALUES(?)\", (name,)); # remember to make it a tuple\n print \"Successfully added player %s\" % name\n return regP",
"def insert(self, name, email, message):\n params = {'name':name, 'email':email, 'date':date.today(), 'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into guestbook (name, email, signed_on, message) VALUES (:name, :email, :date, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True",
"def insertByHand(self):\n\n fieldValues = []\n for field in self.fieldNames:\n fieldValues.append(raw_input(\"Give \" + field + \": \"))\n\n print(self.tableName + \".insert(\" + str(fieldValues) + \")\")\n\n self.insert(fieldValues)",
"def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()",
"def registerPlayer(name):\n db, cursor = connect()\n cursor.execute(\"INSERT INTO players (name, wins, matches) VALUES (%s, 0, 0)\" , (name, ) ) \n db.commit() \n db.close()",
"def process(self, row):\n #print (row)\n \n key = conf[\"equipment_id\"]\n \n result = \"test start\" \n result = \"test end\" \n \n data = 1 ## 1\n \n self.db.save(key, result, data)",
"def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()",
"def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()",
"def create_speaker(conn, speaker):\n\n sql = ''' INSERT INTO speaker(name,gender,native)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, speaker)\n return cur.lastrowid",
"def test_adds_seeming_notes(campaign):\n\n npc.commands.create_character.changeling('changeling mann', 'Beast', 'Hunterheart')\n character = campaign.get_character('changeling mann.nwod')\n assert ' Seeming Beast (8-again animal ken and free specialty; glamour adds to presence and composure; -4 untrained mental; no 10-again on Int)' in character.read()",
"def insert_item(self, text_path, word_first, word_second, word_third, word_fourth, word_fifth):\n conn, cursor = SQLDatabase.connect()\n self.change_database(cursor)\n success = False\n try:\n cursor.execute(\"INSERT INTO {table} (Text_Path, Word_First, Word_Second, Word_Third,\\\n Word_Fourth, Word_Fifth) VALUES ('{path}','{first}', '{second}',\\\n '{third}', '{fourth}', '{fifth}')\\\n \".format(table=self.table_name, path=text_path, first=word_first,\n second=word_second,\n third=word_third, fourth=word_fourth, fifth=word_fifth))\n conn.commit()\n success = True\n except mysql.connector.errors.ProgrammingError as err:\n print(\"{} can't insert item\".format(err))\n finally:\n SQLDatabase.close(cursor, conn)\n\n return success",
"def insert_question(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"insert into {self.site} values (?)\", (id, ))\n self.conn.commit()\n cursor.close()"
] | [
"0.68971574",
"0.6643312",
"0.6609034",
"0.63286656",
"0.62008613",
"0.596312",
"0.5952589",
"0.5870909",
"0.58442897",
"0.5829052",
"0.58106846",
"0.58059555",
"0.57868826",
"0.57820517",
"0.5769277",
"0.56395286",
"0.5630925",
"0.56198794",
"0.5592354",
"0.55757797",
"0.5517927",
"0.550607",
"0.55005515",
"0.5493217",
"0.54873043",
"0.5483246",
"0.5479291",
"0.54688966",
"0.5468399",
"0.545576"
] | 0.6655293 | 1 |
Prints a string representation of SnakemakeRule instance | def __repr__(self):
template = """
SnakemakeRule ({})
- parent_id : {}
- input : {}
- output : {}
- local : {}
- template : {}
- params : {}
"""
return template.format(
self.rule_id,
self.parent_id,
self.input,
self.output,
self.local,
self.template,
self.params,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return \"[ %s ]\" % str(self.__rule)",
"def __str__(self):\n return \"{ %s }\" % str(self.__rule)",
"def __str__(self):\n return \"{ %s }1\" % str(self.__rule)",
"def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())",
"def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule",
"def view_rule(self, rule_name):\n\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n print(self.rule_source[rule_name])",
"def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret",
"def get_formatted_rule(rule=None):\r\n rule = rule or {}\r\n return ('action: %s\\n'\r\n 'protocol: %s\\n'\r\n 'source_ip_address: %s\\n'\r\n 'source_ip_subnet_mask: %s\\n'\r\n 'destination_ip_address: %s\\n'\r\n 'destination_ip_subnet_mask: %s\\n'\r\n 'destination_port_range_start: %s\\n'\r\n 'destination_port_range_end: %s\\n'\r\n 'version: %s\\n'\r\n % (rule.get('action', 'permit'),\r\n rule.get('protocol', 'tcp'),\r\n rule.get('sourceIpAddress', 'any'),\r\n rule.get('sourceIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationIpAddress', 'any'),\r\n rule.get('destinationIpSubnetMask', '255.255.255.255'),\r\n rule.get('destinationPortRangeStart', 1),\r\n rule.get('destinationPortRangeEnd', 1),\r\n rule.get('version', 4)))",
"def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))",
"def fmt_rule(rule: Callable, *, gets: Optional[List[Tuple[str, str]]] = None) -> str:\n type_hints = get_type_hints(rule)\n product = type_hints.pop(\"return\").__name__\n params = \", \".join(t.__name__ for t in type_hints.values())\n gets_str = \"\"\n if gets:\n get_members = \", \".join(\n f\"Get[{product_subject_pair[0]}]({product_subject_pair[1]})\"\n for product_subject_pair in gets\n )\n gets_str = f\", gets=[{get_members}]\"\n return f\"@rule({fmt_rust_function(rule)}({params}) -> {product}{gets_str})\"",
"def rule_to_str(self, t):\r\n\r\n if(t[0] == TERMINAL):\r\n return self.terminal_to_str(t[1])\r\n else:\r\n return toRuleString[t[1]]",
"def pretty_str(rule,print_option=PrintOption()):\n if rule.is_terminal() or rule.is_empty():\n content = str(rule)\n if print_option.bikeshed:\n return \"`{}`\".format(content)\n return content\n if rule.is_symbol_name():\n name = rule.content\n def with_meta(phrase,metachar,print_option):\n content = \" \".join([x.pretty_str(print_option) for x in phrase])\n if len(phrase) > 1:\n return \"( {} ){}\".format(content, metachar)\n return \"{} {}\".format(content, metachar)\n if name in print_option.replace_with_starred:\n phrase = print_option.replace_with_starred[name]\n return with_meta(phrase,'*',print_option)\n if name in print_option.replace_with_optional:\n phrase = print_option.replace_with_optional[name]\n return with_meta(phrase,'?',print_option)\n if name in print_option.replace_with_nested:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.replace_with_nested[name].pretty_str(po)\n return \"( {} )\".format(content)\n if print_option.inline_synthetic and name.find(\"/\") >=0:\n po = print_option.clone()\n po.multi_line_choice = False\n content = po.grammar.rules[name].pretty_str(po)\n return \"( {} )\".format(content)\n\n # Print ourselves\n if print_option.bikeshed:\n context = 'recursive descent syntax'\n g = print_option.grammar\n if g.rules[name].is_token():\n context = 'syntax'\n if name in g.extra_externals:\n context = 'syntax_sym'\n if name == '_disambiguate_template':\n # This is an implementation detail, so make it invisible.\n return ''\n else:\n without_underscore = ['_less_than',\n '_less_than_equal',\n '_greater_than',\n '_greater_than_equal',\n '_shift_left',\n '_shift_left_assign',\n '_shift_right',\n '_shift_right_assign']\n if name in without_underscore:\n name = name[1:]\n return \"[={}/{}=]\".format(context,name)\n return name\n if isinstance(rule,Choice):\n parts = [i.pretty_str(print_option) for i in rule]\n if print_option.multi_line_choice:\n parts.sort()\n\n if print_option.multi_line_choice:\n if print_option.bikeshed:\n nl = \"\\n\\n\"\n prefixer = \"\\n | \"\n else:\n nl = \"\\n\"\n prefixer = \"\\n \"\n else:\n nl = \"\"\n prefixer = \"\"\n joiner = nl + \" | \"\n inside = prefixer + joiner.join([p for p in parts])\n if print_option.is_canonical:\n return inside\n else:\n # If it's not canonical, then it can have nesting.\n return \"(\" + inside + nl + \")\"\n if isinstance(rule,Seq):\n return \" \".join(filter(lambda i: len(i)>0, [i.pretty_str(print_option) for i in rule]))\n if isinstance(rule,Repeat1):\n return \"( \" + \"\".join([i.pretty_str(print_option) for i in rule]) + \" )+\"\n raise RuntimeError(\"unexpected node: {}\".format(str(rule)))",
"def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))",
"def rule_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_name\")",
"def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"",
"def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)",
"def __str__(self):\n s = \"\"\n s += self.synset.name + \"\\t\"\n s += \"PosScore: %s\\t\" % self.pos_score\n s += \"NegScore: %s\" % self.neg_score\n return s",
"def __str__(self):\n s = 'Processor ' + __name__\n # if self._rule_files:\n # s += ' running with rules ' + ' '.join(self._rule_files.values())\n\n return s",
"def get_text(self):\n return self.rule_id + '\\t' + self.rule_text",
"def print_rules(self, input_file='rules.txt'):\n\n with open(input_file, 'r') as f_in:\n rules = f_in.readlines()\n for r in rules:\n print(r)",
"def __print_rules(self, left=0):\n\n for line in self.__rules:\n print((\" \" * left) + line, end=\"\")",
"def __str__(self):\n return \"MatchWhite(%s)\" % str(self.__rule)",
"def rule(self) -> str:\n if self._rule:\n return self._rule\n return self._make_rule(member_param=self._member_param,\n unique_member_param=self._unique_member_param)",
"def dumpSMRule(ruleInfos, outputFile, inputFile):\n if 'py' in ruleInfos:\n code = ruleInfos['py']\n if type(code) is str:\n outputFile.write(insertPlaceholders(code, inputFile))\n elif type(code) is list:\n [outputFile.write(insertPlaceholders(line, inputFile) + '\\n') for line in code]\n\n outputFile.write('rule ' + ruleInfos['rule'] + ':\\n')\n for field in SNAKEMAKE_FIELDS:\n if field in ruleInfos:\n outputFile.write(' ' + field + ': ' + str(ruleInfos[field]) + '\\n')",
"def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"",
"def __str__(self):\n\n return \"[\" + str(self.quick) + \"] \" + \\\n self.regexp.pattern + \" --> \" + \\\n str(self.handler)",
"def __repr__(self):\n template = \"\"\"\n DataIntegrationRule ({})\n \n - inputs : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n\n return template.format(\n self.rule_id,\n self.inputs,\n self.output,\n self.local,\n self.template,\n self.params\n )",
"def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )",
"def __str__(self):\n return \"Combine(%s)\" % str(self.__rule)",
"def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str"
] | [
"0.7497269",
"0.74336165",
"0.73365",
"0.7002294",
"0.6986714",
"0.6474687",
"0.6371976",
"0.63269794",
"0.62451273",
"0.620827",
"0.6196005",
"0.6190379",
"0.6182506",
"0.6179493",
"0.6122271",
"0.60830194",
"0.60090804",
"0.5991629",
"0.59799564",
"0.5888777",
"0.5873028",
"0.58716637",
"0.58665186",
"0.5865816",
"0.58422565",
"0.5788415",
"0.5774052",
"0.5752707",
"0.5749632",
"0.57353145"
] | 0.7577809 | 0 |
Prints a string representation of DataIntegrationRule instance | def __repr__(self):
template = """
DataIntegrationRule ({})
- inputs : {}
- output : {}
- local : {}
- template : {}
- params : {}
"""
return template.format(
self.rule_id,
self.inputs,
self.output,
self.local,
self.template,
self.params
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return \"[ %s ]\" % str(self.__rule)",
"def __str__(self):\n return \"{ %s }\" % str(self.__rule)",
"def __str__(self):\n return \"{ %s }1\" % str(self.__rule)",
"def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule",
"def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )",
"def __repr__(self):\n template = \"\"\"\n SnakemakeRule ({})\n \n - parent_id : {}\n - input : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n return template.format(\n self.rule_id,\n self.parent_id,\n self.input,\n self.output,\n self.local,\n self.template,\n self.params,\n )",
"def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())",
"def __str__(self):\n outs = str(self.cluster_subspace).split(\"\\n\")[:6]\n\n if self.regression_data is not None:\n # This might need to be redefined to take \"expectation\" using measure\n feature_avg = np.average(self.feature_matrix, axis=0)\n feature_std = np.std(self.feature_matrix, axis=0)\n outs += [\n f\"Regression Data : estimator={self.regression_data.estimator_name}\",\n f\" module={self.regression_data.module}\",\n f\" parameters={self.regression_data.parameters}\",\n f\"Target Property : \"\n f\"mean={np.mean(self.regression_data.property_vector):0.4f} \"\n f\"std={np.std(self.regression_data.property_vector):0.4f}\",\n ]\n fit_var = sum(\n self._subspace.function_total_multiplicities[1:] * self.eci[1:] ** 2\n )\n outs += [\n f\"ECI-based Property : mean={self.eci[0]:0.4f}\"\n f\" std={np.sqrt(fit_var):0.4f}\",\n \"Fit Summary\",\n ]\n\n for i, term in enumerate(self._subspace.external_terms):\n outs.append(f\"{repr(term)}={self.coefs[len(self.eci) + i]:0.3f}\")\n\n if self.regression_data is not None:\n outs += [\n \" ---------------------------------------------------------------------\"\n \"-------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI Feature AVG\"\n \" Feature STD ECI * STD |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f}{feature_avg[0]:^15.3f}\"\n f\"{feature_std[0]:^15.3f}{feature_std[0] * self.eci[0]:^13.3f}|\",\n ]\n else:\n outs += [\n \" ---------------------------------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f} |\",\n ]\n\n for degree, orbits in self.cluster_subspace.orbits_by_size.items():\n for orbit in orbits:\n for i, bits in enumerate(orbit.bit_combos):\n line = (\n f\" |{orbit.bit_id + i:^6}{orbit.id:^12}{degree:^10}\"\n f\"{orbit.base_cluster.diameter:^20.4f}\"\n f\"{self.eci[orbit.bit_id + i]:^7.3f}\"\n )\n if self.regression_data is not None:\n line += (\n f\"{feature_avg[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i] * self.eci[orbit.bit_id + i]:^13.3f}\" # noqa\n )\n line += \"|\"\n outs.append(line)\n outs.append(\" \" + (len(outs[-1]) - 1) * \"-\")\n return \"\\n\".join(outs)",
"def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)",
"def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"",
"def __str__(self):\n\n ret = ''\n for rule in self.rules:\n ret += str(rule) + '\\n'\n ret += 'IF TRUE THEN {0}'.format(self.default)\n\n return ret",
"def __repr__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return str( self )",
"def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output",
"def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'",
"def __str__(self):\n return 'GradientAnisotropicDiffusion:\\n' \\\n ' time_step: {self.time_step}\\n' \\\n ' conductance: {self.conductance}\\n' \\\n ' conductance_scaling_update_interval: {self.conductance_scaling_update_interval}\\n' \\\n ' no_iterations: {self.no_iterations}\\n' \\\n .format(self=self)",
"def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)",
"def __repr__(self):\n return f\"{self.__class__.__name__}(validate={self._validate}, axis={self._axis})\"",
"def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr",
"def __repr__(self):\n options_str = \", \".join(\n [\n f\"validate={self._validate}\",\n f\"outcome={self._outcome}\",\n f\"alpha_prior={self._alpha_prior}\",\n ]\n )\n return f\"{self.__class__.__name__}({options_str})\"",
"def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()",
"def __repr__(self):\n\n rep = \"\"\n rep += str(self.literal)+\"\\n\"\n rep += str(self.bindings)+\"\\n\"\n rep += str(self.facts)+\"\\n\"\n return (rep)",
"def __str__(self):\n runner = self.__head\n if runner is None:\n return \"\"\n while runner.next_node:\n if runner is not None:\n print(\"{}\".format(runner.data))\n runner = runner.next_node\n return \"{}\".format(runner.data)",
"def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr",
"def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )",
"def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str",
"def __str__(self):\n\n rep = 'Generalized Syllogism:\\n'\n rep += '\\ttask: {}\\n'.format(self.task)\n rep += '\\tencoded_task: {}\\n'.format(self.encoded_task)\n rep += '\\tp1: {}\\n'.format(self.p1)\n rep += '\\tp2: {}\\n'.format(self.p2)\n rep += '\\tquantifier_p1: {}\\n'.format(self.quantifier_p1)\n rep += '\\tquantifier_p2: {}\\n'.format(self.quantifier_p2)\n rep += '\\tfigure: {}\\n'.format(self.figure)\n rep += '\\tTerms:\\n'\n rep += '\\t\\tA: {}\\n'.format(self.A)\n rep += '\\t\\tB: {}\\n'.format(self.B)\n rep += '\\t\\tC: {}\\n'.format(self.C)\n return rep",
"def __str__(self, output=[]):\n\n class_str = 'Analytical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str",
"def __str__(self, output=[]):\n\n class_str = 'Numerical Phonon simulation properties:\\n\\n'\n class_str += super().__str__()\n\n return class_str",
"def __repr__(self):\n return 'PCFGRule(%s, %s, %s)' % (self.variable, self.derivation, self.probability)",
"def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))"
] | [
"0.68674856",
"0.68624336",
"0.6645672",
"0.6532725",
"0.6322785",
"0.628434",
"0.62689245",
"0.6190423",
"0.604574",
"0.6039333",
"0.59951794",
"0.5989062",
"0.5976397",
"0.59762686",
"0.5963851",
"0.5931985",
"0.59197044",
"0.59166557",
"0.590007",
"0.5897591",
"0.5883885",
"0.5875002",
"0.5871039",
"0.5841265",
"0.58263445",
"0.57791466",
"0.5776673",
"0.57741016",
"0.5771794",
"0.576991"
] | 0.7635887 | 0 |
Compute the jacobian of a finger at configuration q0. | def compute_jacobian(self, finger_id, q0):
frame_id = self.tip_link_ids[finger_id]
return pinocchio.computeFrameJacobian(
self.robot_model,
self.data,
q0,
frame_id,
pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jacobian(self, dt):\n raise NotImplementedError",
"def jacobian(self, x):\n pass",
"def jacobian_ur5(q, delta=0.0001):\n # Alocacion de memoria\n J = np.zeros((3,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n # Iteracion para la derivada de cada columna\n for i in xrange(6):\n # Copiar la configuracion articular inicial\n dq = copy(q);\n # Incrementar la articulacion i-esima usando un delta\n dq[i] = dq[i] + delta \n dT = fkine_ur5(dq)\n \n J[:,i] = (dT[0:3, 3] - T[0:3, 3])/delta\n\n return J",
"def jacobian(self, dt):\n return self._F_cache",
"def jacobian(x, u):\n yaw = x[2, 0]\n v = u[0, 0]\n jac = np.array([\n [1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],\n [0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jac",
"def jacobian(self, v):\n from scipy.special import erf, erfcx\n def integrand(u_arr):\n \"\"\"Integrand of self-consistency equation\"\"\"\n integrand_all = erfcx(-u_arr)\n #integrand_all = np.zeros(u_arr.shape)\n #u_mask = u_arr < -4.0\n #u = u_arr[u_mask]\n #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n #3.0 / (4.0 * u**5) - \n #15.0 / (8.0 * u**7))\n #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask]))\n return integrand_all\n\n\n mu_v = self.mu(v)\n sd_v = self.sd(v)\n low = (self.V_r - mu_v) / sd_v # reduced resting potential\n up = (self.theta - mu_v) / sd_v # reduced threshold\n f_low = integrand(low)\n f_up = integrand(up)\n jac_mat_1 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_mu\n jac_mat_2 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_var / (2. * sd_v**2)\n\n jac_T = np.diag(1. / v**2) - \\\n jac_mat_1.T * (f_up - f_low) + \\\n jac_mat_2.T * (f_up * up - f_low * low)\n return jac_T.T",
"def _compute_dq(self, finger_id, xdes, q0):\n Ji = self.compute_jacobian(finger_id, q0)[:3, :]\n frame_id = self.tip_link_ids[finger_id]\n xcurrent = self.data.oMf[frame_id].translation\n Jinv = np.linalg.pinv(Ji)\n return Jinv.dot(xdes - xcurrent)",
"def jacobian(self, x1, x2, out=None):\n raise NotImplementedError",
"def jacobian(self, c):\n\n raise NotImplementedError",
"def jacobian_pose_ur5(q, delta=0.0001):\n J = np.zeros((7,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n Q = rot2quat(T[0:3,0:3])\n\n for i in xrange(6):\n dq = copy(q)\n dq[i] = dq[i] + delta\n dT = fkine_ur5(dq)\n dQ = rot2quat(dT[0:3,0:3])\n Jpos = (dT[0:3,3] - T[0:3,3])/delta\n Jrot = (dQ - Q)/delta\n #Jrot \t= np.squeeze(np.asarray(Jrot))\n J[:,i] = np.concatenate((Jpos, Jrot), axis=0)\n \n return J",
"def jacobian_ik(robot, q_init: dict, q_goal: dict, params=None, use_limits=True):\n if params is None:\n tol = 1e-6\n maxiter = 5000\n dt = 1e-3\n method = \"dls_inverse\"\n else:\n tol = params[\"tol\"]\n maxiter = params[\"maxiter\"]\n dt = params[\"dt\"]\n method = params[\"method\"]\n\n n = robot.n\n ub = np.array(variable_dict_to_list(robot.ub))\n lb = np.array(variable_dict_to_list(robot.lb))\n q_bar = (ub + lb) / 2.0\n q = np.array(variable_dict_to_list(q_init))\n\n N_ee = len(robot.end_effectors)\n\n k = 0.01 # DLS jacobian inverse damping factor\n k0 = 20 # joint limit gain\n\n # gains\n K_p = np.eye(3) * 1000 # position gain\n K_o = np.eye(3) * 1000 # orientation gain\n\n K = np.eye(6)\n K[:3, :3] = K_p\n K[3:, 3:] = K_o\n K = np.kron(np.eye(N_ee), K)\n\n count = 0\n\n # Initialize system\n e = error(robot, q, q_goal)\n J, J_star = stacked_jacobians(robot, q)\n ll, llinv = stacked_L(robot, q, q_goal)\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e)))\n # loop unitl error is converged AND all joint angles are within bounds.\n while (\n np.linalg.norm(e) > tol or (any((q > ub) | (q < lb)) and use_limits)\n ) and count < maxiter:\n\n J, J_star = stacked_jacobians(robot, q) # get jacobians\n\n e = error(robot, q, q_goal) # Error to goal\n\n ll, llinv = stacked_L(\n robot, q, q_goal\n ) # Accounting for Euler Error (see eqn. 387 on p. 139)\n\n if use_limits:\n q_dot = (\n -k0 / n * (q - q_bar) / (ub - lb) * q_dot\n ) # Joint angle avoidance using eqn. 3.57 on p. 126\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e))) + np.dot(\n (np.eye(n) - np.dot(J_star, J)), q_dot\n )\n\n q = q + q_dot * dt # update joint angles\n q = (q + np.pi) % (2 * np.pi) - np.pi # wrap angles to -pi to pi\n\n if count % 100 == 0:\n print(\"count: %s\" % count)\n print(\"error: %s\" % e)\n print(\"q_dot: %s\", q_dot)\n U, S, V = np.linalg.svd(J)\n cond = np.min(S) / np.max(S)\n print(\"Jacobian condition: %s\" % cond)\n\n print(\"q: %s\" % q)\n count += 1\n\n if count >= maxiter:\n print(\"Did not find config!\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count\n else:\n\n print(\"Finished\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n print(\"Joint Angles: %s\" % q)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count",
"def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian",
"def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]",
"def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx",
"def jacobian(f, x, dx):\n x = np.atleast_1d(x)\n dx = np.atleast_1d(dx)\n nx = len(x)\n ny = 0\n jacobi = None\n e = np.zeros(nx)\n for ix in xrange(nx):\n e *= 0\n e[ix] = 1\n deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))\n if ix == 0:\n ny = len(deriv)\n jacobi = np.empty((ny, nx))\n jacobi[:, ix] = deriv\n return jacobi",
"def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value",
"def jacobian(self, t, x, u, w):\n a= u[0]\n theta = x[2]\n v = x[3]\n fx = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-v*np.sin(theta), v*np.cos(theta), 0, 0],\n [np.cos(theta), np.sin(theta), 0, 0]])\n fu = np.array([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n w = w * self.w_scale\n fw = np.array([[np.cos(theta), - np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, v, 0],\n [0, 0, 0, v]])\n return [fx, fu, fw]",
"def jacobian(f, x):\n\n B, N = x.shape\n x.requires_grad = True\n in_ = torch.zeros(B, 1)\n \n y = f(in_, x)\n jacobian = list()\n \n for i in range(N):\n v = torch.zeros_like(y)\n v[:, i] = 1.\n dy_i_dx = torch.autograd.grad(y,\n x,\n grad_outputs=v,\n retain_graph=True,\n create_graph=True,\n allow_unused=True)[0] # shape [B, N]\n jacobian.append(dy_i_dx)\n\n jacobian = torch.stack(jacobian, dim=2).requires_grad_()\n\n return jacobian",
"def jacobian(Lfrac, Lstar_10, qlf):\n D = np.tile(qlf.c_B*Lstar_10**qlf.k_B, [len(Lfrac),1])\n Lfrac_2D = np.tile(Lfrac, [len(qlf.c_B),1]).T\n return np.sum(-D*Lfrac_2D**qlf.k_B,axis=1) / np.sum(D*(qlf.k_B -1)*Lfrac_2D**qlf.k_B,axis=1)\n #return np.sum(D*(1.+qlf.k_B)*Lfrac_2D**qlf.k_B, axis=1)/np.sum(D*Lfrac_2D**qlf.k_B, axis=1)",
"def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])",
"def _compute_jacobian(self):\n q_sum = np.cumsum(self._q)\n self._sines = np.sin(q_sum)\n self._cosines = np.cos(q_sum)\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._jacobian = np.array([\n np.cumsum([\n self._jnt_lengths[2] * c_123,\n self._jnt_lengths[1] * c_12,\n self._jnt_lengths[0] * c_1\n ])[::-1], # compute jacobian 1st row\n np.cumsum([\n -self._jnt_lengths[2] * s_123,\n -self._jnt_lengths[1] * s_12,\n -self._jnt_lengths[0] * s_1\n ])[::-1] # jacobian 2nd row\n ])\n self._jacobian_psinv = np.matmul(\n self._jacobian.T,\n np.linalg.inv(np.matmul(self._jacobian, self._jacobian.T))\n )",
"def jacobian_c(self, x, out=None, **kwargs):\n return empty_matrix(0, self.nx)",
"def get_jacobian_spatial(self, qs=None) -> np.ndarray:\n if qs is None:\n qs = self.get_current_joint_position()\n return self.robot.jacob0(qs)",
"def jacobian_linear(self, joint_angles: dict, query_frame: str = \"\") -> np.ndarray:\n\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = []\n for ee in self.end_effectors: # get p nodes in end-effectors\n if ee[0][0] == \"p\":\n end_effector_nodes += [ee[0]]\n if ee[1][0] == \"p\":\n end_effector_nodes += [ee[1]]\n\n node_names = [\n name for name in self.structure if name[0] == \"p\"\n ] # list of p node ids\n\n # Ts = self.get_full_pose_fast_lambdify(joint_angles) # all frame poses\n Ts = self.get_all_poses(joint_angles) # all frame poses\n Ts[\"p0\"] = np.eye(4)\n\n J = np.zeros([0, len(node_names) - 1])\n for ee in end_effector_nodes: # iterate through end-effector nodes\n ee_path = kinematic_map[ee][:-1] # no last node, only phys. joint locations\n\n T_0_ee = Ts[ee] # ee frame\n p_ee = T_0_ee[0:3, -1] # ee position\n\n Jp_t = np.zeros([3, len(node_names) - 1]) # translation jac for theta\n Jp_al = np.zeros([3, len(node_names) - 1]) # translation jac alpha\n for joint in ee_path: # algorithm fills Jac per column\n T_0_i = Ts[joint]\n z_hat_i = T_0_i[:3, 2]\n x_hat_i = T_0_i[:3, 0]\n p_i = T_0_i[:3, -1]\n j_idx = node_names.index(joint)\n Jp_t[:, j_idx] = np.cross(z_hat_i, p_ee - p_i)\n Jp_al[:, j_idx] = np.cross(x_hat_i, p_ee - p_i)\n\n J_ee = np.vstack([Jp_t, Jp_al])\n J = np.vstack([J, J_ee]) # stack big jac for multiple ee\n\n return J",
"def __calc_jacobian_matrix(self):\n\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]\n self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.",
"def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J",
"def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad",
"def _calc_J(self, name, x, lambdify=True):\n\n J = None\n J_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_J'\n\n # check to see if should try to load functions from file\n J, J_func = self._load_from_file(filename, lambdify)\n\n if J is None and J_func is None:\n # if no saved file was loaded, generate function\n print('Generating Jacobian function for %s' % filename)\n\n Tx = self._calc_Tx(name, x=x, lambdify=False)\n # NOTE: calculating the Jacobian this way doesn't incur any\n # real computational cost (maybe 30ms) and it simplifies adding\n # the orientation information below (as opposed to using\n # sympy's Tx.jacobian method)\n # TODO: rework to use the Jacobian function and automate\n # derivation of the orientation Jacobian component\n J = []\n # calculate derivative of (x,y,z) wrt to each joint\n for ii in range(self.N_JOINTS):\n J.append([])\n J[ii].append(Tx[0].diff(self.q[ii])) # dx/dq[ii]\n J[ii].append(Tx[1].diff(self.q[ii])) # dy/dq[ii]\n J[ii].append(Tx[2].diff(self.q[ii])) # dz/dq[ii]\n\n if 'EE' in name:\n end_point = self.N_JOINTS\n elif 'link' in name:\n end_point = int(name.strip('link'))\n elif 'joint' in name:\n end_point = int(name.strip('joint'))\n # can't have more joint derivatives than there are joints\n end_point = min(end_point, self.N_JOINTS)\n\n # add on the orientation information up to the last joint\n for ii in range(end_point):\n J[ii] = J[ii] + list(self.J_orientation[ii])\n # fill in the rest of the joints orientation info with 0\n for ii in range(end_point, self.N_JOINTS):\n J[ii] = J[ii] + [0, 0, 0]\n J = sp.Matrix(J).T # correct the orientation of J\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(J, open(\n '%s/%s/%s' % (self.config_folder, filename, filename), 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return J\n\n if J_func is None:\n J_func = self._generate_and_save_function(\n filename=filename, expression=J,\n parameters=self.q+self.x)\n return J_func",
"def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)",
"def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)"
] | [
"0.7174676",
"0.7125965",
"0.71125233",
"0.702315",
"0.6786565",
"0.6759826",
"0.6759339",
"0.66214025",
"0.6568994",
"0.653416",
"0.6530263",
"0.6495432",
"0.64650875",
"0.64634913",
"0.64042807",
"0.6402122",
"0.6401185",
"0.6370891",
"0.6329893",
"0.63101774",
"0.63079286",
"0.63009334",
"0.62985003",
"0.62862515",
"0.6283873",
"0.6275171",
"0.6254552",
"0.6234334",
"0.6226862",
"0.6226862"
] | 0.7850823 | 0 |
Creates the initial search space using latin hypercube sampling. | def lhs_start(hyperbounds, n_samples, rng=None):
low_bounds = []
high_bounds = []
for bound in hyperbounds:
low_bounds.append(bound[0])
high_bounds.append(bound[1])
low_bounds = np.array(low_bounds, dtype=object)
high_bounds = np.array(high_bounds, dtype=object)
samples = sample_latin_hypercube(low_bounds, high_bounds, n_samples, rng=rng)
samples = samples.tolist()
return samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_latin_hypercube(samples, param_dict, class_root, seed=10):\n # Set random seed\n random.seed(seed)\n\n # Create dictionary to hold sampled parameter values\n sample_points = {}\n for key in param_dict.keys():\n sample_points[key] = np.zeros(samples)\n Ndim = len(param_dict.keys())\n pnames = [key for key in param_dict.keys()]\n\n # List of indices for each dimension\n l = [range(samples) for j in range(Ndim)]\n\n # Generate samples until there are no indices left to choose\n for i in range(samples):\n\n # Randomly choose index and then remove the number that was chosen\n # (Latin hypercubes require at most one item per row and column)\n for j, p in enumerate(pnames):\n pmin, pmax = param_dict[p]\n idx = random.choice(l[j])\n\n # Get value at this sample point (add 0.5 to idx get bin centroid)\n sample_points[p][i] = pmin + (pmax - pmin) \\\n * (idx + 0.5) / float(samples)\n l[j].remove(idx) # Remove choice from list (sampling w/o replacement)\n\n return sample_points",
"def latin_hypercube(n_pts, dim):\n X = np.zeros((n_pts, dim))\n centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)\n for i in range(dim): # Shuffle the center locataions for each dimension.\n X[:, i] = centers[np.random.permutation(n_pts)]\n\n # Add some perturbations within each box\n pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)\n X += pert\n return X",
"def latin_hypercube(n_pts, mins, maxs):\n #return a latin_hypercube\n design = lhs(np.size(maxs), samples=n_pts)\n for i in range(2):\n design[:, i] = design[:, i] * (maxs[i]-mins[i]) + mins[i]\n return design",
"def localInitialize(self):\n SVL = self.readFromROM()\n self._generateQuadsAndPolys(SVL)\n #print out the setup for each variable.\n msg = self.printTag+' INTERPOLATION INFO:\\n'\n msg += ' Variable | Distribution | Quadrature | Polynomials\\n'\n for v in self.quadDict:\n msg += ' '+' | '.join([v,self.distDict[v].type,self.quadDict[v].type,self.polyDict[v].type])+'\\n'\n msg += ' Polynomial Set Degree: '+str(self.maxPolyOrder)+'\\n'\n msg += ' Polynomial Set Type : '+str(SVL.indexSetType)+'\\n'\n self.raiseADebug(msg)\n\n self.raiseADebug('Starting index set generation...')\n self.indexSet = IndexSets.factory.returnInstance(SVL.indexSetType)\n self.indexSet.initialize(self.features, self.importanceDict, self.maxPolyOrder)\n if self.indexSet.type=='Custom':\n self.indexSet.setPoints(SVL.indexSetVals)\n\n self.sparseGrid = Quadratures.factory.returnInstance(self.sparseGridType)\n self.raiseADebug(f'Starting {self.sparseGridType} sparse grid generation...')\n self.sparseGrid.initialize(self.features, self.indexSet, self.dists, self.quadDict, self.jobHandler)\n\n if self.writeOut is not None:\n msg = self.sparseGrid.__csv__()\n outFile = open(self.writeOut,'w')\n outFile.writelines(msg)\n outFile.close()\n\n self.limit=len(self.sparseGrid)\n self.raiseADebug(f'Size of Sparse Grid: {self.limit}')\n self.raiseADebug('Finished sampler generation.')\n\n self.raiseADebug('indexset:',self.indexSet)\n for SVL in self.ROM.supervisedContainer:\n SVL.initialize({'SG': self.sparseGrid,\n 'dists': self.dists,\n 'quads': self.quadDict,\n 'polys': self.polyDict,\n 'iSet': self.indexSet})",
"def setUp(self):\n self.grid = SudukuGrid(BaseCase)\n for i in range(81):\n self.grid[i] = SudukuAlphabet.VALUES[(i+(i//9)*3+i//27)%9]",
"def sample_latin_hypercube(low, high, n_samples, rng=None):\n if rng is None:\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n n_dims = low.shape[0]\n\n samples = []\n for i in range(n_dims):\n if isinstance(low[i], numbers.Integral):\n sample = random.sample(range(low[i], high[i]), n_samples)\n elif isinstance(low[i], numbers.Real):\n lower_bound = low[i]\n upper_bound = high[i]\n sample = lower_bound + rng.uniform(0, 1, n_samples) * (upper_bound - lower_bound)\n else:\n raise ValueError('Latin hypercube sampling can only draw from types int and real,'\n ' got {}!'.format(type(low[i])))\n\n samples.append(sample)\n\n samples = np.array(samples, dtype=object)\n\n for i in range(n_dims):\n rng.shuffle(samples[i, :])\n\n return samples.T",
"def latin_hypercube_sampler(n=1, indim=1, bounds=None, rng=None):\r\n rng = ensure_rng(rng)\r\n if bounds is None:\r\n bounds = np.zeros((indim, 2))\r\n bounds[:,1] = 1. \r\n # Divide each dimension into `n` equal intervals\r\n hypercubes = np.linspace(bounds[:,0], bounds[:,1], n+1)\r\n \r\n l = hypercubes[:-1,:].reshape(-1,)\r\n u = hypercubes[1:,:].reshape(-1,)\r\n _x = rng.uniform(l,u, (1, indim*n)).reshape(n, indim)\r\n x = _x\r\n for j in range(indim):\r\n x[:,j] = _x[rng.permutation(n), j]\r\n return x",
"def initialize(self):\n#TODO: choose user defined START position\n values_type = np.dtype(float)\n self.visual_field = np.zeros(self.number_of_locs, dtype=values_type)\n self.weighted_sums = np.zeros(self.number_of_locs, dtype=values_type)\n self.prior_prob = 1.0 / np.prod(self.number_of_locs)\n self.post_probs = np.full(\n self.number_of_locs, self.prior_prob, dtype=values_type\n )\n starting_location = np.array(START)\n self.focus = get_index_of_in(starting_location,self.senzory_map)\n self.target_location = [\n x for x in xrange(self.number_of_locs) if x != self.focus\n ][random.randint(0,self.number_of_locs-2)]",
"def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"",
"def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube",
"def __init__(self):\n super().__init__()\n self.type = 'SparseGridCollocationSampler'\n self.printTag = 'SAMPLER '+self.type.upper()\n self.maxPolyOrder = None #L, the relative maximum polynomial order to use in any dimension\n self.indexSetType = None #TP, TD, or HC; the type of index set to use\n self.polyDict = {} #varName-indexed dict of polynomial types\n self.quadDict = {} #varName-indexed dict of quadrature types\n self.importanceDict = {} #varName-indexed dict of importance weights\n self.maxPolyOrder = None #integer, relative maximum polynomial order to be used in any one dimension\n self.lastOutput = None #pointer to output dataObjects object\n self.ROM = None #pointer to ROM\n self.jobHandler = None #pointer to job handler for parallel runs\n self.doInParallel = True #compute sparse grid in parallel flag, recommended True\n self.dists = {} #Contains the instance of the distribution to be used. keys are the variable names\n self.writeOut = None\n self.indexSet = None\n self.sparseGrid = None\n self.features = None\n self.sparseGridType = None\n self.addAssemblerObject('ROM', InputData.Quantity.one)",
"def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return",
"def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)",
"def setUp(self):\n # generate lattice\n self.lattice = lattice.Lattice()\n self.lattice.addAtom(\"He\", [0,0,0], 0)\n self.lattice.addAtom(\"He\", [2,0,0], 0)\n self.lattice.addAtom(\"He\", [0,2,0], 0)\n self.lattice.addAtom(\"He\", [0,0,2], 0)\n self.lattice.addAtom(\"He\", [9,9,9], 0)\n self.lattice.addAtom(\"He\", [2,2,0], 0)\n self.lattice.addAtom(\"He\", [2,0,2], 0)\n self.lattice.addAtom(\"He\", [0,2,2], 0)\n self.lattice.addAtom(\"He\", [2,2,2], 0)\n \n # indexes of cluster atoms\n self.bigClusterIndexes = [0,1,2,3,5,6,7,8]\n self.smallClusterIndexes = [4]\n \n # filter\n self.filter = clusterFilter.ClusterFilter(\"Cluster\")",
"def main():\n\n parser = argparse.ArgumentParser(description='Create a new Wordsearch')\n parser.add_argument('size', type=grid_size_type,\n help=\"height and width of our wordsearch grid (min: 3)\")\n parser.add_argument('wordfile', type=argparse.FileType('r'),\n help=\"file including words to search for\")\n parser_args = parser.parse_args()\n\n new_matrix = Matrix(parser_args.size)\n\n words_to_find = create_word_list_from_file(parser_args.wordfile, parser_args.size)\n\n words_found = []\n for word in words_to_find:\n if word not in words_found and word in new_matrix:\n words_found.append(word)\n\n print(\"\\n{}\\n\\n{}\\n\".format(new_matrix, \" \".join(sorted(words_found))))",
"def at_object_creation(self):\r\n with open(\"./commands/CSW15.txt\") as word_file:\r\n self.db.csw15 = set(word.strip().upper() for word in word_file)\r\n self.db.centre = \"\" \r\n self.db.tiledict = {'A' : 9,\r\n 'B' : 2,\r\n 'C' : 2,\r\n 'D' : 4,\r\n 'E' : 12,\r\n 'F' : 2,\r\n 'G' : 3,\r\n 'H' : 2,\r\n 'I' : 9,\r\n 'J' : 1,\r\n 'K' : 1,\r\n 'L' : 4,\r\n 'M' : 2,\r\n 'N' : 6,\r\n 'O' : 8,\r\n 'P' : 2,\r\n 'Q' : 1,\r\n 'R' : 6,\r\n 'S' : 4,\r\n 'T' : 6,\r\n 'U' : 4,\r\n 'V' : 2,\r\n 'W' : 2,\r\n 'X' : 1,\r\n 'Y' : 2,\r\n 'Z' : 1,\r\n '?' : 0\r\n } #removing blanks from play; blanks make it very slow. Change here, in dict\r\n self.db.tilestring = list(''.join([L*self.db.tiledict[L] for L in string.ascii_uppercase+'?']))",
"def setUp(self):\n np.random.seed(1234)\n\n _TEST_FILE_NAME = 'AHN3.las'\n _TEST_DATA_SOURCE = 'testdata'\n\n _CYLINDER = InfiniteCylinder(4)\n _PC_260807 = load(os.path.join(_TEST_DATA_SOURCE, _TEST_FILE_NAME))\n _PC_1000 = copy_point_cloud(_PC_260807, array_mask=(\n np.random.choice(range(len(_PC_260807[keys.point]['x']['data'])), size=1000, replace=False)))\n _1000_NEIGHBORHOODS_IN_260807 = list(compute_neighbors.compute_neighborhoods(_PC_260807, _PC_1000, _CYLINDER))\n\n self.point_cloud = _PC_260807\n self.neigh = _1000_NEIGHBORHOODS_IN_260807",
"def convert_searchspace(self, hyperparameter):\n LOG.debug(\"convert input parameter\\n\\n\\t{}\\n\".format(pformat(hyperparameter)))\n searchspace = [[], []]\n for name, param in hyperparameter.items():\n if param[\"domain\"] != \"categorical\" and \"frequency\" not in param.keys():\n param[\"frequency\"] = DEFAULTGRIDFREQUENCY\n warnings.warn(\"No frequency field found, used default gridsearch frequency {}\".format(DEFAULTGRIDFREQUENCY))\n\n if param[\"domain\"] == \"categorical\":\n searchspace[0].append(name)\n searchspace[1].append(param[\"data\"])\n elif param[\"domain\"] == \"uniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_uniform_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"normal\":\n searchspace[0].append(name)\n searchspace[1].append(get_gaussian_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"loguniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_logarithmic_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n return searchspace",
"def initialize(self, search_space, names, outer_i=None):\n name = search_space.name\n names = copy.deepcopy(names)\n names.append(name)\n output_dim = self.cells[-1].hidden_size\n\n num_inner = self.search_space.eval_(search_space.num_inner, **locals())\n if len(num_inner) > 1:\n key = f'{\"_\".join(names[:-1])}_{len(num_inner)}_{name}s'\n add_if_doesnt_exist(self.policies, key, nn.Linear(output_dim, len(num_inner)))\n add_if_doesnt_exist(self.values, key, nn.Linear(output_dim, len(num_inner)))\n\n add_increment(self.embedding_index, f'{name}_start')\n add_increment(self.embedding_index, f'{name}_end')\n\n self.adapt(search_space.outer.items(), names, outer_i)\n\n for i in range(max(num_inner)):\n add_increment(self.embedding_index, f'{i+1}_{name}s')\n if isinstance(search_space.inner, (list, tuple)):\n for space in search_space.inner: self.initialize(space, names, i)\n elif isinstance(search_space.inner, SearchSpace):\n self.initialize(search_space.inner, names, i)\n else:\n assert isinstance(search_space.inner, dict), \\\n 'Inner search space must be either list, dict or SearchSpace.'\n self.adapt(search_space.inner.items(), names, outer_i)\n add_increment(self.embedding_index, f'{name}_inner_done')",
"def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]",
"def grid_search(self):\n\t\t''' common settings without grid-search '''\n\t\tbinary_rele, unknown_as_zero = False, False\n\t\tcommon_data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,\n\t\t\t\t\t\t\t\tunknown_as_zero=unknown_as_zero, binary_rele=binary_rele)\n\n\t\tdata_meta = get_data_meta(data_id=self.data_id) # add meta-information\n\t\tcommon_data_dict.update(data_meta)\n\n\t\t''' some settings for grid-search '''\n\t\tchoice_presort = [True] if self.debug else [True]\n\t\tchoice_sample_rankings_per_q = [1] if self.debug else [1] # number of sample rankings per query\n\t\tchoice_scale_data, choice_scaler_id, choice_scaler_level = get_default_scaler_setting(data_id=self.data_id, grid_search=True)\n\n\t\tfor scale_data, scaler_id, scaler_level, presort, sample_rankings_per_q in product(choice_scale_data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_level,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_presort,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_sample_rankings_per_q):\n\n\t\t\tself.data_dict = dict(presort=presort, sample_rankings_per_q=sample_rankings_per_q,\n\t\t\t\t\t\t\t\t scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)\n\t\t\tself.data_dict.update(common_data_dict)\n\t\t\tyield self.data_dict",
"def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).",
"def __init__(self, center_words, context_words, neg_samples): \n self.center_words = center_words\n self.context_words = context_words\n self.neg_samples = neg_samples\n # The index of the data the batch should start from. \n self.data_index = 0",
"def test_init_experiment(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n\n exp_ass = LAss.exp_assistants[name]\n\n assert_equal(exp_ass.optimizer, optimizer)\n assert_is_none(exp_ass.optimizer_arguments, None)\n assert_equal(exp_ass.experiment.minimization_problem, minimization)\n with assert_raises(ValueError):\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)",
"def __init__(self, hyperparameters, total_dim, num_is):\n self._dim = total_dim # dimension of IS \\times search space\n self._num_is = num_is # Number of information sources, then including 0th IS (truth), size of hyper should be dim * (num_is+1).\n # Note: it's not (dim+1)*(num_is+1) because dimension of search space is (dim-1), plus the multiplication factor param is dim\n self.set_hyperparameters(hyperparameters)",
"def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))",
"def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector",
"def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]",
"def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)",
"def initializeDistribution(self):\n self.checkDistParams()\n\n self.lowerBound = min(self.mapping.keys())\n self.upperBound = max(self.mapping.keys())"
] | [
"0.6223337",
"0.59628797",
"0.58912903",
"0.5874907",
"0.5854502",
"0.5754657",
"0.5733271",
"0.56222767",
"0.5502466",
"0.54998386",
"0.54974604",
"0.5460404",
"0.5426823",
"0.5370437",
"0.53134114",
"0.5280649",
"0.52736545",
"0.5264173",
"0.52439547",
"0.52239805",
"0.52133995",
"0.5201464",
"0.5197781",
"0.5195287",
"0.5166887",
"0.51579237",
"0.5157122",
"0.5151636",
"0.5146957",
"0.5110705"
] | 0.6189054 | 1 |
converts time to gmt, appends to list | def gmt(time):
gmt = [0]*time.size
for i in range(time.size):
gmt[i]=datetime.utcfromtimestamp(time[i]).strftime('%Y-%m-%d %H:%M:%S')
return gmt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_time(self):\r\n\r\n curr_time = datetime.datetime.now()\r\n time = []\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.second)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.minute)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.hour)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.day)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.month)])\r\n time.append([int(x) for x in '{0:06b}'.format(curr_time.year - 2000)])\r\n return time",
"def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]",
"def get_times():\n global times\n global times_list\n base_url = \"http://www.crawleymosque.com/\"\n r = requests.get(base_url)\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n times_list = []\n for salah_time in soup.find_all(class_=\"prayer-start\"):\n times_list.append(salah_time.contents[0].strip())\n\n print(times_list)\n times = []\n for i in times_list:\n datetime_object = datetime.strptime(i, \"%I:%M %p\")\n just_time = datetime.time(datetime_object)\n times.append(just_time)\n\n print(times)\n\n # spam = Label(root, text=\"checking for spam\")\n # spam.place(x=460, y=110)",
"def time_to_hour_and_minute(time):\n return [time // 60, time % 60]",
"def convert_time(time):\n\n s = time.split()[0]\n s_h = int(s.split(':')[0])\n\n am_pm = s.split(':')[1][-2:]\n if s_h == 12:\n s_h = s_h - 12\n if am_pm == 'PM':\n s_h = s_h + 12\n s_h = s_h + 1\n\n e = time.split()[2]\n e_h = int(e.split(':')[0])\n\n am_pm = e.split(':')[1][-2:]\n if e_h == 12:\n e_h = e_h - 12\n if am_pm == 'PM':\n e_h = e_h + 12\n e_h = e_h + 1\n\n hour_list = range(s_h, e_h + 1)\n return hour_list",
"def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]",
"def convert_time(self, t_variable):\n date_list = []\n times = self.dataset[t_variable].values\n\n for time in times:\n try:\n time = pd.to_datetime(str(time))\n date_list.append(time.strftime('%Y-%m-%dT%H:%M:%SZ'))\n except ValueError as ve:\n print(\"Error parsing and converting '%s' variable object to CovJSON compliant string.\" % (t_variable), ve)\n\n return date_list",
"def convert_seconds_to_readable(self, time_value):\n time_readable = []\n for value in time_value:\n time_readable_mini = time.strftime('%I:%M:%S%p', time.localtime(value))\n time_readable.append(time_readable_mini)\n mylog.debug('Converting %s to %s' % (value, time_readable_mini))\n return time_readable",
"def get_time(t):\n return [time.clock()-t[0], time.time()-t[1]]",
"def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]",
"def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list",
"def get_time_strs(self):\n\n log(\"Getting time strings starting at {}\".format(self._t0))\n tz = dt.timezone.utc\n mkdt = lambda n: dt.datetime.fromtimestamp(\n self._t0 - (self._delta * n),\n tz=tz\n )\n ns = range(self._frames, 0, -1)\n return [mkdt(n).strftime('%Y%m%d%H%M') for n in ns]",
"def get_times(my_vars):\n base_time = my_vars['base_time'].getValue()\n try:\n times=my_vars['time']\n except KeyError:\n times = my_vars['time_offset']\n\n ts = []\n for time in times:\n temp = datetime.utcfromtimestamp(base_time+time)\n if (temp.minute == 0) :\n ts.append(temp)\n return ts",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:",
"async def _timein_list(self):\n\t\t\n\t\tmessage = 'Favourites\\n```Name: Timezones\\n'\n\t\t\n\t\tfor fav in self.favourites:\n\t\t\tmessage += fav + ': '\n\t\t\tmessage += self.favourites[fav].replace(',', ', ').replace('_', ' ') + '\\n'\n\t\t\n\t\tmessage += '```'\n\t\tawait self.bot.say(message)",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second",
"def add_timecard(self,time,name):\n id = self.find_employee_id(name)\n if id in self.timecard:\n self.timecard[id].append(time)\n else:\n self.timecard[id] = [time]\n return self.timecard",
"def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)",
"def get_current_time():\n cur_time = datetime.datetime.now() + offset_time\n return [cur_time.year, cur_time.month, cur_time.day, cur_time.hour, cur_time.min, cur_time.second]",
"def _splitTime(self, time): \n if (time):\n x = re.split(\"[-\\/\\s:]\", time)\n else:\n x = []\n # Pad the list to four elements (year,month,day,hour)\n while (len(x) < 4):\n x.append(None)\n return x",
"def order_by_ftime(tasks_lst):\n return sorted(tasks_lst, key=lambda task: task[1])",
"def conv_time(l, h):\n\t# Function modified from post on ActiveState by John Nielsen\n\n\t#converts 64-bit integer specifying the number of 100-nanosecond\n\t#intervals which have passed since January 1, 1601.\n\t#This 64-bit value is split into the\n\t#two 32 bits stored in the structure.\n\td = 116444736000000000L \n\n\t# Some LNK files do not have time field populated \n\tif l + h != 0:\n\t\tnewTime = (((long(h) << 32) + long(l)) - d)/10000000 \n\telse:\n\t\tnewTime = 0\n\n\treturn time.strftime(\"%Y/%m/%d %H:%M:%S %a\", time.localtime(newTime))",
"def add_times_of_travels(dfs_splited: list, time: datetime.datetime) -> list:\n results = google_api_request(dfs_splited, time)\n logger.debug(\"Ready answer for request\")\n print(results)\n travel_times = [[i.get('duration', {}).get('value')\n for i in result['rows'][0]['elements']]\n for result in results]\n logger.debug(\"Times of travel extracted\")\n\n return [df.assign(time_sec=time_t)\n for df, time_t in zip(dfs_splited, travel_times)]",
"def time_to_view(view, edit, fmt):\n for s in view.sel():\n if s.empty():\n view.insert(edit, s.a, time.strftime(fmt))\n else:\n view.replace(edit, s, time.strftime(fmt))",
"def get_timestring_from_int(time_array, format=\"%H:%M:%S\"):\n list = []\n for value in time_array:\n list.append((value, int2dt(value, 1).strftime(format)))\n return list",
"def add_time(t):\n\n times.append(t)\n\n # update number display to show real time\n number_display.time = t\n number_display.update()\n\n # generate new scramble and update scramble_image\n new_scramble = generate_scramble(int(settings['puzzle']),\n int(settings['scramble-length']))\n scrambles.append(new_scramble)\n scramble_image.clear()\n scramble_image.chars = char(new_scramble)\n\n ao5, ao12 = update_stats()\n\n with open(session_file.string, 'a') as f:\n if len(times) == 1:\n f.write(f'{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')\n else:\n f.write(f'\\n{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')",
"def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]",
"def get_teams_and_schedule():\n start_time = timedelta(hours=19)\n time_to_add = timedelta(minutes=15)\n teams = session.query(Team).all()\n\n for team in teams:\n team.time = str(start_time)\n start_time += time_to_add\n yield team",
"def gprmc_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[2] == 'V':\r\n return\r\n raw_date = gps[9]\r\n time = ''\r\n date = raw_date[0:2]\r\n month = raw_date[2:4]\r\n year = raw_date[4:]\r\n #modify year if reaches year 2100\r\n time += date + '/' + month + '/20' + year\r\n return [time]",
"def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]"
] | [
"0.6536344",
"0.61749583",
"0.5915965",
"0.57811874",
"0.5765255",
"0.5675775",
"0.5667193",
"0.5663627",
"0.5660941",
"0.5621869",
"0.5613156",
"0.55986845",
"0.5576497",
"0.5538767",
"0.55233485",
"0.55079854",
"0.5443805",
"0.5407144",
"0.53734505",
"0.53541595",
"0.53422564",
"0.5310349",
"0.5274767",
"0.5265787",
"0.52646846",
"0.5240537",
"0.52287173",
"0.5223971",
"0.5217923",
"0.52029425"
] | 0.72068673 | 0 |
finds stations that don't have predictand data and appends them to a list | def miss_station(all_stations,stations):
diff = len(all_stations)-len(stations)
k=0
i=0
miss_stations = ['']*diff
a = all_stations[:]
a.sort()
s = stations[:]
s.sort()
while i < len(stations):
while a[i] != s[i]:
miss_stations[k]=a[i]
del a[i]
k+=1
i+=1
return miss_stations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stations():\n\n return station_list",
"def prep_stations(url):\n stations = []\n _stations = requests.get(url).json()\n\n for _station in _stations['stationBeanList']:\n if _station['statusKey'] == 1:\n stations.append([_station['stationName'], _station['id'],\n _station['availableDocks'], _station['totalDocks'],\n _station['latitude'], _station['longitude']])\n\n return stations",
"def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}",
"def get_processed_stations(out_dir):\n lista = [ f.split('_')[0] for f in os.listdir(out_dir) if '.nc' in f ]\n #print('Skipping these stations: ' , lista )\n return lista",
"def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])",
"def stations(station_let):\n\tstat = ['']*np.size(station_let,0)\n\tfor i in range(len(stat)):\n\t\tfor j in range(4):\n\t\t\tif station_let[i][j] is not np.ma.masked:\n\t\t\t\tstat[i]+=station_let[i][j]\n\treturn stat",
"def _build_stations(self, stop_list):\n # stations = [] TODO: What is this for\n dists = self._euclidian_distances(stop_list)\n stations = self._calculate_y_lines(dists)\n return stations",
"async def get_train_stations(self, latitude: float, longitude: float,\n valid_stations=None) -> list:\n params = {\n 'location': '{},{}'.format(latitude, longitude),\n 'key': self.api_key,\n 'type': \"train_station\",\n \"radius\": 1600\n }\n\n logging.info(\"Getting train stations near (%f, %f)\", latitude, longitude)\n\n async with aiohttp.ClientSession() as session:\n async with session.post('https://maps.googleapis.com/maps/api/place/nearbysearch/json',\n params=params) as response:\n if response.status == HTTPStatus.OK:\n payload = await response.json()\n if payload['status'] == 'OK':\n if valid_stations:\n return [result[\"name\"] for result in payload[\"results\"]\n if result[\"name\"] in valid_stations]\n\n return [result[\"name\"] for result in payload[\"results\"]]\n\n return []",
"def all(self, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n return self._stations_lst",
"def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations",
"def station_list() -> List[Dict]:\n return STATIONS",
"def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely",
"def create_list() -> List[Optional[float]]:\n return [None] * num_stations",
"def getStations(self) :\n return self._stations",
"def get_all_stations(session: Session) -> List[Row]:\n return session.query(PlanningWeatherStation.station_code).all()",
"def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]",
"def read_noaa_stations(self):\n # wget -c http://weather.noaa.gov/data/nsd_bbsss.txt\n #72;656;KSFD;Winner, Bob Wiley Field Airport;SD;United States;4;43-23-26N;099-50-33W;;;619;;\n #93;246;NZRO;Rotorua Aerodrome;;New Zealand;5;38-07S;176-19E;38-07S;176-19E;285;294;\n #block;synop;icao;name;?;country;??;lat;lon;lat2;lon2;height;?;\n #0 1 2 3 4 5 6 7 8 9 10 11 12\n if not os.path.exists(self.noaa_filename):\n LOGGER.warning('could not find noaa file \"%s\"', self.noaa_filename)\n return self.known_stations\n count = 0\n with open(self.noaa_filename, 'r') as csvfile:\n stationreader = csv.reader(csvfile, delimiter=';')\n for row in stationreader:\n station_id = '{}{}'.format(row[0], row[1])\n station_id_icao = row[2].strip().upper()\n data = noaa_station_data_from_row(row)\n if data is not None:\n count += 1\n self.known_stations[station_id] = data\n if len(station_id_icao) == 4 and station_id_icao.isalpha():\n self.known_stations[station_id_icao] = data\n self.noaa_file_age = os.path.getmtime(self.noaa_filename)\n LOGGER.info(' Loaded %i noaa station records from \"%s\"', count, self.noaa_filename)\n return self.known_stations",
"def _add_data(self, model_stations: Iterable[model.Station],\n validate_prefix: str = \"\") -> int:\n valid_station_count = 0\n jreast_merged_codes: dict[model.StationID, str] = load_csv_as_mapping(\n DIR_CURATED / \"jreast_merged_codes.csv\",\n itemgetter(\"sta_id\"),\n itemgetter(\"code\")\n )\n\n # Add data from model stations\n for model_sta in model_stations:\n is_invalid = False\n should_validate = model_sta.id.startswith(validate_prefix)\n\n # Find a matching geo_sta\n geo_sta = self.by_id.get(model_sta.id)\n if not geo_sta:\n if should_validate:\n self.logger.critical(f\"{Color.RED}geo.osm is missing station \"\n f\"{Color.MAGENTA}{model_sta.id}{Color.RESET}\")\n self.valid = False\n continue\n\n # Find a name\n name_id = last_part(geo_sta.id)\n geo_sta.name = self.names.get(name_id)\n if geo_sta.name is None and should_validate:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Copy stop_code\n geo_sta.code = model_sta.code\n\n # Check if station was valid\n if is_invalid:\n self.valid = False\n elif should_validate:\n valid_station_count += 1\n\n # Generate codes and names for mother stations\n for sta in self.by_id.values():\n if not sta.children:\n continue\n\n name_id = last_part(sta.id)\n sta.name = self.names.get(name_id)\n if not sta.name:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Get children codes\n children_codes = []\n jreast_merged_code = jreast_merged_codes.get(sta.id)\n if jreast_merged_code:\n children_codes.append(jreast_merged_code)\n\n for child in sta.children:\n # Ignore JR-East child codes if there's a JR-East merged code\n if child.id.startswith(\"JR-East\") and jreast_merged_code:\n continue\n elif child.code:\n children_codes.append(child.code)\n\n sta.code = \"/\".join(children_codes)\n\n return valid_station_count",
"def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations",
"async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()",
"def get_all_stations(engine): \n # Query db\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.latitude, \"\n \" d.longitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\")\n df = pd.read_sql(sql, engine)\n\n return df",
"def get_stations(self):\n return self.__request('stations')['stations']",
"def get_stations(base_url, hts, mtype):\n stns1 = ws.site_list(base_url, hts, location='LatLong') # There's a problem with Hilltop that requires running the site list without a measurement first...\n stns1 = ws.site_list(base_url, hts, location='LatLong', measurement=mtype)\n stns2 = stns1[(stns1.lat > -47.5) & (stns1.lat < -34) & (stns1.lon > 166) & (stns1.lon < 179)].dropna().copy()\n stns2.rename(columns={'SiteName': 'ref'}, inplace=True)\n\n return stns2",
"def create_station_list(self):\n sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)\n\n return sorted_station_list",
"def stations(self):\n try:\n stations_api = requests.get(self._stations_url)\n stations = {}\n for station in stations_api.json():\n station_id = station['id']\n station_name = station['name']\n stations[station_id] = station_name\n\n return stations\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None",
"def list_stations(intent, session):\n stations = location.get_stations(config.bikes_api)\n street_name = intent['slots']['street_name']['value']\n possible = location.matching_station_list(stations,\n street_name,\n exact=True)\n street_name = street_name.capitalize()\n\n if len(possible) == 0:\n return reply.build(\"I didn't find any stations on %s.\" % street_name,\n is_end=True)\n elif len(possible) == 1:\n sta_name = location.text_to_speech(possible[0]['name'])\n return reply.build(\"There's only one: the %s \"\n \"station.\" % sta_name,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=(\"One station on %s: %s\" %\n (street_name, possible[0]['name'])),\n is_end=True)\n else:\n last_name = location.text_to_speech(possible[-1]['name'])\n speech = \"There are %d stations on %s: \" % (len(possible),\n street_name)\n speech += (', '.join([location.text_to_speech(p['name'])\n for p in possible[:-1]]) +\n ', and %s' % last_name)\n card_text = (\"The following %d stations are on %s:\\n%s\" %\n (len(possible), street_name,\n '\\n'.join(p['name'] for p in possible)))\n return reply.build(speech,\n card_title=(\"%s Stations on %s\" %\n (config.network_name, street_name)),\n card_text=card_text,\n is_end=True)",
"def get_tasks_that_fit_station(self, station: Station) -> TaskList:\n return TaskList([task for task in self._tasks if station.can_fit(task)])",
"def stations():\n # Query all stations before a given date 2017\n results = session.query(Measurement.date, Measurement.tobs).filter(func.strftime(\"%Y\", Measurement.date) >= \"2017\").all()\n all_results = list(np.ravel(results))\n \n return jsonify(all_results)",
"def read_table_stations(self):\n if not os.path.exists(self.station_table_filename):\n LOGGER.warning('could not find station.table file \"%s\"', self.station_table_filename)\n return self.known_stations\n count = 0\n with open(self.station_table_filename, 'r') as textfile:\n lines = textfile.read().split(LF)\n for line in lines:\n station_id, data = read_table_station_from_line(line)\n if station_id is not None:\n self.known_stations[station_id] = data\n count += 1\n self.station_file_age = os.path.getmtime(self.station_table_filename)\n LOGGER.info(' Loaded %i station records from \"%s\"', count, self.station_table_filename)\n return self.known_stations",
"def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']"
] | [
"0.6918483",
"0.63394576",
"0.6295055",
"0.6175655",
"0.6134076",
"0.6036043",
"0.59990704",
"0.5990439",
"0.5939404",
"0.59325945",
"0.5899833",
"0.5873434",
"0.58216053",
"0.5731162",
"0.5703947",
"0.5658584",
"0.5635071",
"0.563275",
"0.56268054",
"0.5625375",
"0.56150836",
"0.5578745",
"0.5568343",
"0.5566908",
"0.5550945",
"0.5497827",
"0.5480448",
"0.54793215",
"0.5447455",
"0.54438853"
] | 0.66159266 | 1 |
Hexlify raw text, return hexlified text. | def hexlify(text):
if six.PY3:
text = text.encode('utf-8')
hexlified = binascii.hexlify(text)
if six.PY3:
hexlified = hexlified.decode('utf-8')
return hexlified | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unhexlify(text):\n unhexlified = binascii.unhexlify(text)\n\n if six.PY3:\n unhexlified = unhexlified.decode('utf-8')\n\n return unhexlified",
"def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])",
"def normalize(self, text):\n\n return binascii.hexlify(text)",
"def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])",
"def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)",
"def test_unhexlify_not_python():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000000000000000000000000000000000000010')",
"def test_hexlify():\n result = uflash.hexlify(TEST_SCRIPT)\n lines = result.split()\n # The first line should be the extended linear address, ox0003\n assert lines[0] == ':020000040003F7'\n # There should be the expected number of lines.\n assert len(lines) == 5",
"def hex(self):\n return binascii.hexlify(self.data)",
"def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')",
"def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf",
"def _hexlify(data):\n if data is None:\n return None\n elif isinstance(data, bytes) or isinstance(data, bytearray):\n return data.hex()\n elif isinstance(data, list):\n return [_hexlify(item) for item in data]\n elif isinstance(data, dict):\n return {k: _hexlify(v) for k, v in data.items()}\n else:\n return data",
"def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])",
"def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")",
"def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')",
"def hex(cls, x):\n return c_hex(x)",
"def _dehex(s):\n import re\n import binascii\n\n # Remove all non-hexadecimal digits\n s = re.sub(br'[^a-fA-F\\d]', b'', s)\n # binscii.unhexlify works in Python 2 and Python 3 (unlike\n # thing.decode('hex')).\n return binascii.unhexlify(s)",
"def test_unhexlify_bad_unicode():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000004D50FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')",
"def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())",
"def preprocess_hashes(tex):\n blocks = catlist()\n rx = hash_rx\n m = rx.search(tex)\n while m:\n if len(m.group(2)) > 40:\n tex2htm.warn(\"Possible runaway hash: {}\".format(text_sample(m.group(2))))\n raise(None)\n blocks.append(tex[:m.start()])\n blocks.append(re.sub(r'(^|[^\\\\])%', r'\\1\\%', m.group(0)))\n tex = tex[m.end():]\n m = rx.search(tex)\n blocks.append(tex)\n return \"\".join(blocks)",
"def ascii_to_phred64(c):\r\n return ascii_to_phred(c, 64)",
"def remove_hex(text): \n return re.sub(r'&.*?;', r'', text)",
"def test_embed_hex():\n python = uflash.hexlify(TEST_SCRIPT)\n result = uflash.embed_hex(uflash._RUNTIME, python)\n # The resulting hex should be of the expected length.\n assert len(result) == len(python) + len(uflash._RUNTIME) + 1 # +1 for \\n\n # The hex should end with a newline '\\n'\n assert result[-1:] == '\\n'\n # The Python hex should be in the correct location.\n py_list = python.split()\n result_list = result.split()\n start_of_python_from_end = len(py_list) + 5\n start_of_python = len(result_list) - start_of_python_from_end\n assert result_list[start_of_python:-5] == py_list\n # The firmware should enclose the Python correctly.\n firmware_list = uflash._RUNTIME.split()\n assert firmware_list[:-5] == result_list[:-start_of_python_from_end]\n assert firmware_list[-5:] == result_list[-5:]",
"def hex(string):\n return string.encode('hex')",
"def tohex(data: str) -> str:\n match = re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)\n if match:\n return data.lower()\n match = re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data)\n if not match:\n raise ValueError(f\"Required hex of the form `0x` or `H` found {data}\")\n match = re.match(r\"^[0-9a-fA-F]+\", data)\n return f\"0x{match.group().lower()}\"",
"def entity_encode_hex(input, errors='strict'):\n output = ''\n for character in input:\n if character in ('&', '<', '>'):\n output += \"&#x%s;\" % character.encode('hex')\n else:\n output += character\n\n return (output, len(input))",
"def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]",
"def hexify(c):\n try:\n s = c.encode(\"utf-8\").encode(\"hex\")\n except UnicodeDecodeError:\n s = 0\n n = len(s)\n if n <= 2: return s\n a = ' - '.join([s[i:i+2] for i in range(0,n,2)])\n return a[:-1]",
"def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())",
"def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()",
"def toHexa(data):\n\tresult = \"\"\n\tif isBytes(data):\n\t\tdata = data.decode(\"latin-1\")\n\tfor i in data:\n\t\tresult += \"\\\\x%02X\"%ord(i)\n\treturn result"
] | [
"0.7632533",
"0.7439492",
"0.70764095",
"0.65433866",
"0.6380229",
"0.61830765",
"0.61594963",
"0.61447966",
"0.6132453",
"0.6124988",
"0.61229116",
"0.6114684",
"0.59884095",
"0.5947459",
"0.59325373",
"0.5788667",
"0.5782058",
"0.57663274",
"0.5741124",
"0.57352805",
"0.57184196",
"0.569126",
"0.5679554",
"0.56763494",
"0.5665832",
"0.56610286",
"0.5636232",
"0.56199336",
"0.5613361",
"0.56061125"
] | 0.78595716 | 0 |
Unhexlify raw text, return unhexlified text. | def unhexlify(text):
unhexlified = binascii.unhexlify(text)
if six.PY3:
unhexlified = unhexlified.decode('utf-8')
return unhexlified | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hexlify(text):\n if six.PY3:\n text = text.encode('utf-8')\n\n hexlified = binascii.hexlify(text)\n\n if six.PY3:\n hexlified = hexlified.decode('utf-8')\n\n return hexlified",
"def test_unhexlify_not_python():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000000000000000000000000000000000000010')",
"def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')",
"def normalize(self, text):\n\n return binascii.hexlify(text)",
"def test_unhexlify_bad_unicode():\n assert '' == uflash.unhexlify(\n ':020000040003F7\\n:10E000004D50FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')",
"def _hexlify(data):\n if data is None:\n return None\n elif isinstance(data, bytes) or isinstance(data, bytearray):\n return data.hex()\n elif isinstance(data, list):\n return [_hexlify(item) for item in data]\n elif isinstance(data, dict):\n return {k: _hexlify(v) for k, v in data.items()}\n else:\n return data",
"def test_hexlify():\n result = uflash.hexlify(TEST_SCRIPT)\n lines = result.split()\n # The first line should be the extended linear address, ox0003\n assert lines[0] == ':020000040003F7'\n # There should be the expected number of lines.\n assert len(lines) == 5",
"def _dehex(s):\n import re\n import binascii\n\n # Remove all non-hexadecimal digits\n s = re.sub(br'[^a-fA-F\\d]', b'', s)\n # binscii.unhexlify works in Python 2 and Python 3 (unlike\n # thing.decode('hex')).\n return binascii.unhexlify(s)",
"def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])",
"def test_hexlify_empty_script():\n assert uflash.hexlify('') == ''",
"def CUnescape(text):\n # type: (str) -> bytes\n\n def ReplaceHex(m):\n # Only replace the match if the number of leading back slashes is odd. i.e.\n # the slash itself is not escaped.\n if len(m.group(1)) & 1:\n return m.group(1) + 'x0' + m.group(2)\n return m.group(0)\n\n # This is required because the 'string_escape' encoding doesn't\n # allow single-digit hex escapes (like '\\xf').\n result = _CUNESCAPE_HEX.sub(ReplaceHex, text)\n\n return (result.encode('utf-8') # Make it bytes to allow decode.\n .decode('unicode_escape')\n # Make it bytes again to return the proper type.\n .encode('raw_unicode_escape'))",
"def unH(s):\n return ''.join([chr(int(s[i:i+2],16)) for i in range(2, len(s),2)])",
"def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]",
"def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf",
"def unescape(msg):\n skip = False\n unescaped = bytearray()\n\n for i in range(len(msg)):\n\n if not skip and msg[i] is 0x7D:\n\n if not (i + 1) >= len(msg):\n unescaped.append(msg[i + 1] ^ 0x20)\n skip = True\n\n elif not skip:\n unescaped.append(msg[i])\n else:\n skip = False\n\n return unescaped",
"def decode_and_hexlify_hashes(hash_str: str) -> typing.Union[str, None]:\n\n return binascii.hexlify(base64.b64decode(hash_str.encode())).decode() if hash_str else None",
"def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii",
"def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output",
"def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")",
"def bh2u(x: bytes) -> str:\n return hfu(x).decode('ascii')",
"def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])",
"def entity_decode_hex(input, errors='strict'):\n if _is_unicode(input):\n if '%' not in input:\n return s\n bits = _asciire.split(input)\n res = [bits[0]]\n append = res.append\n for i in range(1, len(bits), 2):\n append(unquote(str(bits[i])).decode('latin1'))\n append(bits[i + 1])\n return (''.join(res), len(input))\n\n preamble_regex = re.compile(r\"&#x\", flags=re.I)\n bits = preamble_regex.split(input)\n # fastpath\n if len(bits) == 1:\n return input\n res = [bits[0]]\n append = res.append\n for item in bits[1:]:\n try:\n append(_hextochr[item[:2]])\n append(item[3:])\n except KeyError:\n append('&#x')\n append(item)\n append(';')\n\n return (''.join(res), len(input))",
"def unescape(escaped_string):\n\n hex_msg = \"^x not followed by a valid 2-digit hex number\"\n\n token_start = 0\n l = len(escaped_string)\n i = 0\n output = []\n while i < l:\n c = escaped_string[i]\n\n if c in _unprintables:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, \"unprintable character\",\n token_start, i)\n elif c != \"^\":\n output.append(c)\n else:\n if i == l - 1:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, \"caret at end of string\",\n token_start, i)\n i += 1\n next_c = escaped_string[i]\n if next_c not in \"'\\\"^x\":\n if next_c in _unprintables:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string,\n \"^ followed by unprintable character\",\n token_start, i)\n else:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string,\n \"^ followed by invalid character %s\" % (next_c,),\n token_start, i)\n if next_c != 'x':\n output.append(next_c)\n else:\n if i >= l - 2:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i)\n i += 1\n hex1 = escaped_string[i]\n i += 1\n hex2 = escaped_string[i]\n if hex1 not in _ALLOWED_SAMPLE_HEX_DIGITS:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i - 1)\n if hex2 not in _ALLOWED_SAMPLE_HEX_DIGITS:\n raise vps.errorhandler.StringUnspecial_characterException(\n escaped_string, hex_msg, token_start, i)\n val = int(hex1 + hex2, 16)\n output.append(chr(val))\n # incrementing i should happen at the end of the loop body for\n # all paths\n i += 1\n return ''.join(output)",
"def unobscure(obscured: bytes) -> bytes:\n return decompress(b64d(obscured))",
"def remove_hex(text): \n return re.sub(r'&.*?;', r'', text)",
"def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])",
"def cook(raw):\n if sys.version_info[0] < 3:\n # python 2\n if isinstance(raw, str):\n try:\n cooked = raw.decode('utf-8')\n except UnicodeDecodeError:\n cooked = raw.decode('ascii', 'ignore')\n else:\n cooked = raw\n else:\n # python 3\n if isinstance(raw, bytes):\n try:\n cooked = raw.decode('utf-8')\n except UnicodeDecodeError:\n cooked = raw.decode('ascii', 'ignore')\n else:\n cooked = raw\n return cooked",
"def unescape(text):\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == '&#':\r\n try:\r\n if text[:3] == '&#x':\r\n return unichr(int(text[3:-1], 16)).encode('utf-8')\r\n return unichr(int(text[2:-1])).encode('utf-8')\r\n except ValueError:\r\n logger.info('error de valor')\r\n\r\n else:\r\n try:\r\n import htmlentitydefs\r\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode('utf-8')\r\n except KeyError:\r\n logger.info('keyerror')\r\n except:\r\n pass\r\n\r\n return text\r\n\r\n return re.sub('&#?\\\\w+;', fixup, text)",
"def str_to_raw(s):\n raw_map = {8:r'\\b', 7:r'\\a', 12:r'\\f', 10:r'\\n', 13:r'\\r', 9:r'\\t', 11:r'\\v'}\n return r''.join(i if ord(i) > 32 else raw_map.get(ord(i), i) for i in s)",
"def hex(self):\n return binascii.hexlify(self.data)"
] | [
"0.76805043",
"0.724818",
"0.71128637",
"0.6909091",
"0.6862392",
"0.6219935",
"0.6197366",
"0.59515",
"0.5922873",
"0.5843483",
"0.57907474",
"0.5650561",
"0.56349534",
"0.56267136",
"0.55028135",
"0.550243",
"0.5497304",
"0.54252976",
"0.5407516",
"0.53313655",
"0.52876633",
"0.52724916",
"0.5268687",
"0.5219308",
"0.52061766",
"0.52057064",
"0.5189475",
"0.5174156",
"0.5166434",
"0.51478815"
] | 0.8604057 | 0 |
Obtains the record in the set with the time closest to the given $unix_time. If this record with not $within the correct number of seconds, an exception is raised. | def get_record(self, unix_time, within):
if len(self.records) <= 0:
raise Exception("No records in this set")
r = self.records[0]
closest_record = r
closest_delta = abs(r.unix_time - unix_time)
for r in self.records[1:]:
delta = abs(r.unix_time - unix_time)
if delta < closest_delta:
closest_record = r
closest_delta = delta
if closest_delta > within:
raise Exception("Closest record to %d was %d (delta=%d) which exceeds limit of %d" %
(unix_time, closest_record.unix_time, closest_delta, within))
return closest_record | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record",
"def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]",
"def closest_time(self, when):\n try:\n return np.argmin(np.abs(self.time.datetime - when))\n except AttributeError:\n self.load_time()\n return np.argmin(np.abs(self.time.datetime - when))",
"def nearest_time_sql(self, t):\n if self.verbose:\n sys.stderr.write('SQL Time %s' % t)\n self.cursor.execute('SELECT * FROM tracklog WHERE dt <= ? ORDER BY dt DESC LIMIT 1', (t,))\n d = self.cursor.fetchone()\n if d != None:\n t0 = [datetime.strptime(d[0],'%Y-%m-%d %H:%M:%S')]\n t0.extend(d[1:])\n else:\n t0 = None\n self.cursor.fetchall()\n self.cursor.execute('SELECT * FROM tracklog WHERE dt >= ? ORDER BY dt LIMIT 1', (t,))\n d2 = self.cursor.fetchone()\n if d2 != None:\n t1 = [datetime.strptime(d2[0],'%Y-%m-%d %H:%M:%S')]\n t1.extend(d2[1:])\n else:\n t1 = None\n self.cursor.fetchall()\n if self.verbose:\n sys.stderr.write('SQL Resuls %s %s' % (t0,t1))\n if t0 == None or t1 == None:\n return None\n return t0,t1",
"def nearest():\n\n # Time this functions.\n timer = coils.Timer()\n\n # Parse the URL parameter \"time\".\n errors = list()\n try:\n tstamp_query = flask.request.args.get('time')\n time_query = coils.string2time(tstamp_query)\n assert time_query != None\n except:\n errors.append('Failed to parse \"time\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n \n return flask.jsonify(\n result=getNearestTime(time_query),\n elapsed=timer.get().total_seconds(),\n )",
"def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None",
"def _nearest_datetime(self, datetime_list, target_datetime):\n if not datetime_list:\n raise errors.ParserError(\n \"Input parameter datetime_list length is zero. Required\"\n \" parameters: [datetime.datetime], datetime.datetime\")\n work_list = [entry for entry in datetime_list if entry < target_datetime]\n if not work_list:\n raise errors.ParserError(\n \"work_list length is zero. Entries in datetime_list\"\n \" {} are not < target_datetime {}\".format(datetime_list,\n target_datetime))\n return min(\n work_list,\n key=lambda datetime_entry: abs(datetime_entry - target_datetime))",
"def findNearestTime(foamCase, time):\n times = list(getTimeFolders(foamCase,returnType=\"float\"))\n strTimes = np.array(getTimeFolders(foamCase,returnType=\"string\"))\n if time in times:\n try:\n intTime = int(strTimes[times.index(time)])\n return int(time)\n except:\n return time\n else:\n nearestTime = times[np.argmin(np.abs(np.array(times)-time))]\n print(\"Time %f is not available, choosing nearest time %f\" % ( time, nearestTime))\n try:\n intTime = int(strTimes[times.index(nearestTime)])\n return int(nearestTime)\n except:\n return nearestTime",
"def bisect(self, dtime, b=0): # pylint: disable=invalid-name\n return self._collection.bisect(dtime, b)",
"def getNearestTime(time_query):\n\n # Convert datetime object to string, for lookup in database.\n tstamp_query = coils.time2string(time_query)\n\n # Retrieve image timestamps.\n try:\n tstamp_left = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time <= tstamp_query).\\\n order_by(mapping.Image.time.desc()).limit(1)\n tstamp_left = tstamp_left[0].time\n delta_left = abs(coils.string2time(tstamp_left) - time_query)\n except:\n tstamp_left = None\n delta_left = dt.timedelta.max\n \n try:\n tstamp_right = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time >= tstamp_query).\\\n order_by(mapping.Image.time).limit(1)\n tstamp_right = tstamp_right[0].time\n delta_right = abs(coils.string2time(tstamp_right) - time_query)\n except:\n tstamp_right = None\n delta_right = dt.timedelta.max\n \n # The nearest value has the smallest delta from the query.\n result = tstamp_left if (delta_left < delta_right) else tstamp_right\n return result",
"def mempool_assert_relative_time_exceeds(\n condition: ConditionWithArgs, unspent: CoinRecord, timestamp: uint64\n) -> Optional[Err]:\n try:\n expected_seconds = int_from_bytes(condition.vars[0])\n except ValueError:\n return Err.INVALID_CONDITION\n\n if timestamp is None:\n timestamp = uint64(int(time.time()))\n if timestamp < expected_seconds + unspent.timestamp:\n return Err.ASSERT_SECONDS_RELATIVE_FAILED\n return None",
"def findontarget(starttime, event_list):\n for r in event_list:\n if r[0]==18 and r[1]>starttime: return r[1]\n return None",
"def getElemAfterTime(self, stamp):\n newer = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time >= stamp]\n if not newer:\n return None\n return newer[0]",
"def locate_nearest_event(self):\n nearest_event_date = ''\n min = 1000000\n today = self.get_today()\n event_array = self.events.keys()\n for event_date in event_array:\n event_date = self.date_to_operate_format(event_date)\n if int(event_date) - int(today) > 0:\n if int(event_date) - int(today) < min:\n min = int(event_date) - int(today)\n nearest_event_date = event_date\n\n nearest_event = '0'\n if len(event_array) > 0:\n nearest_event = self.change_format_to_database_index(nearest_event_date)\n\n return nearest_event",
"def getElemBeforeTime(self, stamp):\n older = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time <= stamp]\n if not older:\n return None\n return older[-1]",
"def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:\n try:\n expected_seconds = int_from_bytes(condition.vars[0])\n except ValueError:\n return Err.INVALID_CONDITION\n\n if timestamp is None:\n timestamp = uint64(int(time.time()))\n if timestamp < expected_seconds:\n return Err.ASSERT_SECONDS_ABSOLUTE_FAILED\n return None",
"def find_above(self, time, level):\n\n if self.get(time) >= level:\n return time\n ix = self._trace.bisect_right(time)\n for t, lvl in self._trace.items()[ix:]:\n if lvl >= level:\n return t\n return None",
"def get_prev_time(time, c_type=None, c_pk=None):\n\n # TODO if efficiency is an issue here, make occ_times a generator and\n # keep a queue (collections.deque) of the relevant times; this should\n # work because we're moving through them in sequence so could safely\n # discard those before the current time as we go\n\n if c_pk:\n def filter_func(obj):\n return obj['colloquialism__pk'] == c_pk\n elif c_type:\n def filter_func(obj):\n return obj['colloquialism__type'] == c_type\n else:\n def filter_func(obj):\n return True\n\n # filter out relevant times\n filtered = filter(\n lambda obj: filter_func(obj) and obj['start_exact'] < time,\n occ_times)\n\n if not len(filtered):\n return None\n\n # return last filtered time since they are in order\n return filtered[-1]['start_exact']",
"def from_unix_sec(self):\n try:\n self.in_unix_sec = dt.utcfromtimestamp(float(unix)).strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_unix_sec = False\n return self.in_unix_sec",
"def cacheFindEntry(cache, cameraID, desiredTime):\n if not cameraID in cache:\n return None\n cameraTimes = cache[cameraID]\n closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))\n if abs(closestEntry['time'] - desiredTime) < 30:\n # logging.warning('close: %s', str(closestEntry))\n return os.path.join(cache['readDir'], closestEntry['fileName'])\n else:\n # logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))\n return None",
"def object_at(self, time):\n for event in self._timeline: \n if time >= event.start_time and time <= event.end_time:\n return event.obj\n return self._timeline[-1].obj",
"def subset_by_time(prediction_dict, desired_times_unix_sec):\n\n error_checking.assert_is_numpy_array(\n desired_times_unix_sec, num_dimensions=1\n )\n error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)\n\n desired_indices = numpy.array([\n numpy.where(prediction_dict[VALID_TIMES_KEY] == t)[0][0]\n for t in desired_times_unix_sec\n ], dtype=int)\n\n prediction_dict = subset_by_index(\n prediction_dict=prediction_dict, desired_indices=desired_indices\n )\n\n return prediction_dict, desired_indices",
"def find_below(self, time, level):\n\n if self.get(time) <= level:\n return time\n ix = self._trace.bisect_right(time)\n for t, lvl in self._trace.items()[ix:]:\n if lvl <= level:\n return t\n return None",
"def lookup_time_spent():\n while True:\n search_query = input('Show entries in which time spent '\n '(in minutes) is: ')\n if validate_lookup_time_spent_format(search_query):\n break\n print('** Please enter positive integer **')\n return Entry.select().where(Entry.time_spent == search_query)",
"def test_4_data_fetching_unix_time_and_insertion(self):\n d1 = date.today()\n dt1 = datetime(d1.year, d1.month, d1.day) + timedelta(hours=8)\n result, success = self.fitness.get_columns_given_range(dt1, dt1+timedelta(days=1))\n self.assertTrue(success)\n self.assertEqual(result[0]['Datetime'],self.unix_time)",
"def GetPriorUniquePoint(lap: gps_pb2.Lap,\n point_c: gps_pb2.Point) -> gps_pb2.Point:\n index = -1\n point = lap.points[-1]\n while point.time.ToNanoseconds() == point_c.time.ToNanoseconds():\n index -= 1\n point = lap.points[index]\n return point",
"def get_closest_minute(t):\n ts = dt.datetime.utcfromtimestamp(t/1000)\n s = ts.second\n if s < 30:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute)\n else:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute) + dt.timedelta(minutes=1)",
"def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True",
"def fetch_entry(unique_id, time_stamp):\n print('Fetching items with unique_id: {}'.format(unique_id))\n entry_exists = False\n item = None\n try:\n resp = TIME_TABLE.get_item(Key={'uniqueId': unique_id, 'timeStamp': time_stamp})\n print(resp)\n item = resp.get('Item')\n print(item)\n if item:\n entry_exists = True\n except Exception as e:\n print('Unique Item does not exists: {0}. Error: {1}'.format(unique_id, e))\n\n return entry_exists, item",
"def findFirstHigh(thisStFile):\n with open(thisStFile) as f:\n reader = csv.DictReader(f, delimiter='\\t')\n for row in reader:\n return datetime.datetime.strptime(row['time'], fmt)"
] | [
"0.6383025",
"0.57725835",
"0.5726713",
"0.5603155",
"0.550198",
"0.5465888",
"0.51996434",
"0.51091826",
"0.49394408",
"0.49366295",
"0.49245772",
"0.48886275",
"0.48809275",
"0.48312008",
"0.48274845",
"0.4817998",
"0.4760067",
"0.47593623",
"0.47202304",
"0.47175002",
"0.4663312",
"0.4646897",
"0.46306196",
"0.4605473",
"0.46016923",
"0.45864683",
"0.45778677",
"0.45742598",
"0.45709598",
"0.4556572"
] | 0.84365386 | 0 |
Pulls in the records from other into self with the other, but since the timestamps won't match up perfectly, the output will only have a record per $period number of seconds. | def merge_with(self, other, period=60):
new_list = []
last_timestamp = 0
for r in self.records:
if abs(r.unix_time - last_timestamp) > period:
# Accept this record
last_timestamp = r.unix_time
other_r = other.get_record(r.unix_time, period/2)
r.merge_with(other_r)
new_list.append(r)
self.records = new_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result",
"def merge_new(dfc, pairs, span=None):\n global last_update\n t1 = Timer()\n columns = ['open', 'close', 'trades', 'volume', 'buy_ratio']\n exclude = ['_id','high','low','quote_vol','sell_vol', 'close_time']\n projection = dict(zip(exclude, [False]*len(exclude)))\n idx, data = [], []\n db = app.get_db()\n\n if span is None and last_update:\n # If no span, query/merge db records inserted since last update.\n oid = ObjectId.from_datetime(last_update)\n last_update = now()\n _filter = {'_id':{'$gte':oid}}\n else:\n # Else query/merge all since timespan.\n span = span if span else timedelta(days=7)\n last_update = now()\n _filter = {'pair':{'$in':pairs}, 'close_time':{'$gte':now()-span}}\n\n batches = db.candles.find_raw_batches(_filter, projection)\n\n if batches.count() < 1:\n return dfc\n\n try:\n ndarray = bsonnumpy.sequence_to_ndarray(\n batches,\n dtype,\n db.candles.count()\n )\n except Exception as e:\n log.error(str(e))\n return dfc\n #raise\n\n df = pd.DataFrame(ndarray)\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['freq'] = df['freq'].str.decode('utf-8')\n df['pair'] = df['pair'].str.decode('utf-8')\n\n df['freq'] = df['freq'].replace('1m',60)\n df['freq'] = df['freq'].replace('5m',300)\n df['freq'] = df['freq'].replace('1h',3600)\n df['freq'] = df['freq'].replace('1d',86400)\n df = df.sort_values(by=['pair','freq','open_time'])\n\n df2 = pd.DataFrame(df[columns].values,\n index = pd.MultiIndex.from_arrays(\n [df['pair'], df['freq'], df['open_time']],\n names = ['pair','freq','open_time']),\n columns = columns\n ).sort_index()\n\n df3 = pd.concat([dfc, df2]).drop_duplicates().sort_index()\n\n log.debug(\"{:,} records loaded into numpy. [{:,.1f} ms]\".format(\n len(df3), t1))\n #print(\"Done in %s ms\" % t1)\n return df3",
"def _fill_results(self,spec,measurements,period,duration):\r\n logging.info(\"Fill measurements for spec {0}\".format(spec))\r\n \r\n if self._verb==mplane.model.VERB_QUERY:\r\n \"\"\"\r\n Query according to the time specified in the specification\r\n \"\"\"\r\n (first_time,last_time) = spec.when().datetimes()\r\n first_time=int(first_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n last_time=int(last_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n sleep_time = 0\r\n else:\r\n \"\"\"\r\n Query from NOW\r\n \"\"\"\r\n first_time = int(time.time())\r\n if (len(measurements[1])>0 or len(measurements[2])>0) and period<=self._pvsr_default_conf_check_cycle:\r\n #there are newly created or modified measurements\r\n first_time = first_time + self._pvsr_default_conf_check_cycle\r\n if first_time % period > 0:\r\n first_time = first_time - (first_time % period)\r\n last_time = first_time + int(duration / period) * period\r\n sleep_time = duration\r\n\r\n logging.debug(\"From: {0}, To: {1}\".format(datetime.datetime.fromtimestamp(first_time),datetime.datetime.fromtimestamp(last_time)))\r\n \r\n meas_data = {}\r\n\r\n while True:\r\n logging.info(\"Wait {0} seconds\".format(sleep_time))\r\n time.sleep(sleep_time)\r\n sleep_time = 30\r\n \r\n loaded_until=self._pvsr.getLastLoadedDataTimestamp(period)\r\n if int(loaded_until.timestamp())>=last_time or time.time()>last_time+period+300:\r\n for i in (0,1,2):\r\n for j in range(len(measurements[i])):\r\n self._fill_meas_result(measurements[i][j],first_time,last_time,meas_data)\r\n break\r\n else:\r\n logging.debug(\"last loaded is still {0}\".format(loaded_until))\r\n \r\n res = mplane.model.Result(specification=spec)\r\n res.set_when(mplane.model.When(a = datetime.datetime.utcfromtimestamp(first_time+period), b = datetime.datetime.utcfromtimestamp(last_time)))\r\n \r\n tmp_time=first_time+period\r\n row_index=0\r\n while tmp_time<=last_time:\r\n tmp_time2 = datetime.datetime.fromtimestamp(tmp_time)\r\n tmp_time3 = datetime.datetime.utcfromtimestamp(tmp_time)\r\n res.set_result_value(\"time\", tmp_time3, row_index)\r\n if tmp_time2 in meas_data:\r\n for mplane_name in meas_data[tmp_time2]:\r\n value = str(meas_data[tmp_time2][mplane_name])\r\n res.set_result_value(mplane_name, value, row_index)\r\n row_index+=1\r\n tmp_time+=period\r\n \r\n return res",
"def filter_time_match(file1, file2):\n freq1 = int(file1.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n freq2 = int(file2.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n df1, df2 = filter_overlapping_files_dfs(file1, file2)\n\n dt1 = pandas.to_datetime(df1[\"date\"] + \" \" + df1[\"hour\"])\n dt2 = pandas.to_datetime(df2[\"date\"] + \" \" + df2[\"hour\"])\n\n dt_delta = datetime.timedelta(minutes=freq2 - freq1)\n time_match_df1 = dt1.copy()\n time_match_df2 = dt2.copy()\n for idx, dt in dt2.items():\n match = dt1[(dt1 >= dt) & (dt1 <= dt + dt_delta)]\n time_match_df1[match.index] = idx\n time_match_df2[idx] = 0\n time_match_df2[idx] = tuple(match.index)\n\n time_match_df2[time_match_df2.apply(len) != 10]\n return time_match_df1, time_match_df2",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def test_find_df_period(self):\n test_search_df = pd.read_csv(DF_PATH)\n result_1 = find_df_period(test_search_df, 'pickup_datetime', 6)\n p_time_periods_1 = result_1['time_period'].tolist()\n p_intervals_1 = [2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4]\n\n result_2 = find_df_period(test_search_df, 'pickup_datetime', 4)\n p_time_periods_2 = result_2['time_period'].tolist()\n p_intervals_2 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]\n\n self.assertTrue(p_time_periods_1 == p_intervals_1)\n self.assertTrue(p_time_periods_2 == p_intervals_2)",
"def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')",
"def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')",
"def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0",
"def get_records(self, since_ts=0, num_rec=0):\n nerr = 0\n while True:\n try:\n fixed_block = self.get_fixed_block(unbuffered=True)\n if fixed_block['read_period'] is None:\n raise weewx.WeeWxIOError('invalid read_period in get_records')\n if fixed_block['data_count'] is None:\n raise weewx.WeeWxIOError('invalid data_count in get_records')\n if since_ts:\n dt = datetime.datetime.utcfromtimestamp(since_ts)\n dt += datetime.timedelta(seconds=fixed_block['read_period']*30)\n else:\n dt = datetime.datetime.min\n max_count = fixed_block['data_count'] - 1\n if num_rec == 0 or num_rec > max_count:\n num_rec = max_count\n log.debug('get %d records since %s' % (num_rec, dt))\n dts, ptr = self.sync(read_period=fixed_block['read_period'])\n count = 0\n records = []\n while dts > dt and count < num_rec:\n raw_data = self.get_raw_data(ptr)\n data = self.decode(raw_data)\n if data['delay'] is None or data['delay'] < 1 or data['delay'] > 30:\n log.error('invalid data in get_records at 0x%04x, %s' %\n (ptr, dts.isoformat()))\n dts -= datetime.timedelta(minutes=fixed_block['read_period'])\n else:\n record = dict()\n record['ptr'] = ptr\n record['datetime'] = dts\n record['data'] = data\n record['raw_data'] = raw_data\n record['interval'] = data['delay']\n records.insert(0, record)\n count += 1\n dts -= datetime.timedelta(minutes=data['delay'])\n ptr = self.dec_ptr(ptr)\n return records\n except (IndexError, usb.USBError, ObservationError) as e:\n log.error('get_records failed: %s' % e)\n nerr += 1\n if nerr > self.max_tries:\n raise weewx.WeeWxIOError(\"Max retries exceeded while fetching records\")\n time.sleep(self.wait_before_retry)",
"def pull(self, period):\n # Compile the regex expressions we'll use to parse the title text\n self.identity_regex = re.compile(r\"(\\d{4} \\d{3} \\d{3})\\s{2,}(\\S+)\\s{2,}(\\d{3} \\d{3} \\d{3} *\\S*)\")\n self.ats_regex = re.compile(r\"ATS REFERENCE: (\\S*)\")\n self.municipality_regex = re.compile(r\"MUNICIPALITY: (.*)\")\n self.reference_regex = re.compile(r\"REFERENCE NUMBER: (.*?)\\-{80}\", re.DOTALL)\n self.payday_regex = re.compile(r\"(\\-{80}).*(\\-{80})(.*)\", re.DOTALL)\n\n # Filter the dataframe by date and retrieve each title\n df = self.journal\n df = df[df['Registration Date'] >= period]\n\n df.to_pickle('run/{}.journal.pkl'.format(self.runtime))\n\n click.echo('Journal constructed and saved with timestamp {}'.format(self.runtime))\n\n # Set up structure for target DataFrame\n self.dataframe = pd.DataFrame(\n columns=[\n 'linc',\n 'short_legal',\n 'title_number',\n 'ats_reference',\n 'municipality',\n 'registration',\n 'registration_date',\n 'document_type',\n 'sworn_value',\n 'consideration',\n 'condo'\n ], index=df.index\n )\n\n with click.progressbar(df.iterrows(), label='Pulling basic title data', length=len(df)) as d:\n for index, row in d:\n try:\n payload = self.retrieve_title(index)\n self.dataframe.loc[index, 'linc'] = payload['linc']\n self.dataframe.loc[index, 'short_legal'] = payload['short_legal']\n self.dataframe.loc[index, 'title_number'] = payload['title_number']\n self.dataframe.loc[index, 'ats_reference'] = payload['ats_reference']\n self.dataframe.loc[index, 'municipality'] = payload['municipality']\n self.dataframe.loc[index, 'registration'] = payload['registration']\n self.dataframe.loc[index, 'registration_date'] = payload['date']\n self.dataframe.loc[index, 'document_type'] = payload['document_type']\n self.dataframe.loc[index, 'sworn_value'] = payload['value']\n self.dataframe.loc[index, 'consideration'] = payload['consideration']\n self.dataframe.loc[index, 'condo'] = payload['condo']\n except TypeError:\n pass\n\n self.dataframe['registration_date'] = pd.to_datetime(self.dataframe['registration_date'])\n self.dataframe['sworn_value'] = self.dataframe['sworn_value'].astype(float)\n self.dataframe['consideration'] = self.dataframe['consideration'].astype(float)\n self.dataframe['condo'] = self.dataframe['condo'].fillna(False).astype(bool)\n\n self.dataframe.to_pickle('run/{}.dataframe.pkl'.format(self.runtime))\n click.echo('Dataframe constructed and saved with timestamp {}'.format(self.runtime))\n\n return self.dataframe",
"def merge_logfiles(log1, log2):\n first_in_2 = log2['time'][0]\n keep_from_1 = log1['time'] < first_in_2\n for key in log1.keys():\n log1[key] = log1[key][keep_from_1]\n log1.timeseries_append(log2)\n return log1",
"def __init__(self, numQueues, rate, start_hour, end_hour, appt_low, appt_high):\n\n self.rate = rate\n self.numQueues = numQueues\n self.start = datetime.datetime.combine(datetime.date.today(), datetime.time(start_hour,0,0))\n self.end = datetime.datetime.combine(datetime.date.today(), datetime.time(end_hour,0,0))\n self.appt_low = appt_low\n self.appt_high = appt_high\n minutes_for_new_items = (end_hour-start_hour)*60 #new patients seen between 9AM and 4PM\n time_between_items = rate #exponential dist. time parameter\n self.expected_count = int(np.ceil(stats.poisson.ppf(.9999, minutes_for_new_items/time_between_items)))\n self.ques = [datetime.datetime.combine(datetime.datetime.today(), datetime.time(start_hour,0,0)) for i in range(0, self.numQueues)]\n cols = ['simulation', 'num_items', 'wait_count', 'avg_wait_time', 'close_time']\n self.results = pd.DataFrame(columns = cols)\n return",
"def join_domain_time_span(domain_tables, span):\n joined_domain_tables = []\n \n for domain_table in domain_tables:\n #extract the domain concept_id from the table fields. E.g. condition_concept_id from condition_occurrence\n #extract the domain start_date column\n #extract the name of the table\n concept_id_field, date_field, table_domain_field = get_key_fields(domain_table) \n\n domain_table = domain_table.withColumn(\"date\", unix_timestamp(to_date(col(date_field)), \"yyyy-MM-dd\")) \\\n .withColumn(\"lower_bound\", unix_timestamp(date_add(col(date_field), -span), \"yyyy-MM-dd\"))\\\n .withColumn(\"upper_bound\", unix_timestamp(date_add(col(date_field), span), \"yyyy-MM-dd\"))\\\n .withColumn(\"time_window\", lit(1))\n \n #standardize the output columns\n joined_domain_tables.append(\n domain_table \\\n .select(domain_table[\"person_id\"], \n domain_table[concept_id_field].alias(\"standard_concept_id\"),\n domain_table[\"date\"],\n domain_table[\"lower_bound\"],\n domain_table[\"upper_bound\"],\n lit(table_domain_field).alias(\"domain\"))\n )\n \n return joined_domain_tables",
"def fake_record_data():\n\n user_ids = [4, 4, 4, 4, 5,\n 5, 2, 6, 1, 2,\n 5, 7, 5, 1, 3,\n 3, 1, 4, 2, 3,\n 6, 4, 2, 7, 3,\n 3, 3, 6, 7, 6,\n 6, 7, 1, 7, 1,\n 8, 7, 1, 8, 4]\n\n days = [1519200000, 1519200000, 1519200000, 1519200000, 1519113600,\n 1519113600, 1519113600, 1519027200, 1519027200, 1519027200,\n 1518940800, 1518940800, 1518854400, 1518854400, 1518768000,\n 1518681600, 1518681600, 1518681600, 1518681600, 1518681600,\n 1518595200, 1518595200, 1518595200, 1518595200, 1518508800,\n 1518422400, 1518422400, 1518422400, 1518422400, 1518336000,\n 1518336000, 1518336000, 1518336000, 1518249600, 1518249600,\n 1518163200, 1518163200, 1518076800, 1517990400, 1517904000]\n\n for i, user_id in enumerate(user_ids):\n act_qty = random.randint(5, 13)\n selected_activities = set()\n\n for _ in range(0, act_qty):\n act_id = random.randint(1, 13)\n selected_activities.add(act_id)\n\n day = days[-(i + 1)]\n start = day + 33000\n total_time = 0\n\n for act_id in selected_activities:\n act_time = random.randint(120, 1000)\n\n start_t = start + total_time\n end_t = datetime.fromtimestamp(start_t + act_time)\n start_t = datetime.fromtimestamp(start_t)\n\n total_time += act_time\n\n print (str(user_id) + '|' + str(i + 1) + '|' + str(act_id) + '|' +\n str(start_t) + '|' + str(end_t))",
"def update_period(self, period):\n\n # Update attribute\n self._period = period\n\n # Create new data and time\n shape = int(round(self._drate) * self._period + 1)\n new_data = OrderedDict([(ch, np.zeros(shape=shape)) for i, ch in enumerate(self.channels)])\n new_time = np.zeros(shape=shape)\n\n # Check whether new time and data hold more or less indices\n decreased = True if self._time.shape[0] >= shape else False\n\n if decreased:\n # Cut time axis\n new_time = self._time[:shape]\n\n # If filled before, go to 0, else go to 0 if currnt index is bigger than new shape\n if self._filled:\n self._idx = 0\n else:\n self._idx = 0 if self._idx >= shape else self._idx\n\n # Set wheter the array is now filled\n self._filled = True if self._idx == 0 else False\n\n else:\n # Extend time axis\n new_time[:self._time.shape[0]] = self._time\n\n # If array was filled before, go to last time, set it as offset and start from last timestamp\n if self._filled:\n self._idx = self._time.shape[0]\n self._start = self._timestamp\n self._offset = self._time[-1]\n\n self._filled = False\n\n # Set new time and data\n for ch in self.channels:\n if decreased:\n new_data[ch] = self._data[ch][:shape]\n else:\n new_data[ch][:self._data[ch].shape[0]] = self._data[ch]\n\n # Update\n self._time = new_time\n self._data = new_data",
"def copy_many_from_temp(self,\r\n count):\r\n\r\n for counter in range(count):\r\n print(PERIOD,end=EMPTYCHAR)\r\n self.copy_from_temp(self.tempobject)\r\n self.constitute_key_freq_dict()\r\n print()",
"def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data",
"def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods",
"def stack_ps(ps1, ps2, keep_unique = False, fill_time = False, message = True):\n # create deepcopies to avoid changing original instances\n \n ps1 = copy.deepcopy(ps1)\n ps2 = copy.deepcopy(ps2)\n \n # create datetime information in PS instances\n \n try:\n _ = getattr(ps1, \"datetime\")\n except AttributeError:\n ps1.createTimeDate()\n \n try: \n _ = getattr(ps2, \"datetime\")\n except AttributeError:\n ps2.createTimeDate()\n \n # check time resolutions\n res1 = (dt.datetime.strptime(ps1.datetime['data'][1], ps1.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])).seconds\n res2 = (dt.datetime.strptime(ps2.datetime['data'][1], ps2.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])).seconds\n \n if abs(res1-res2) > 60:\n if message:\n print( (\"warning: resolutions differ %d seconds\")%(abs(res1-res2)) )\n \n # check if ps1 is \"older\" than ps2\n \n reversed_order = False\n cut = None\n \n if dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units']) < dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']):\n # ps2 starts after ps1 ends\n timediff = (dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units'])).total_seconds()\n elif dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units']) < dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']):\n # ps1 starts after ps2 ends (user has inadvertently switched the order of the instances)\n reversed_order = True\n timediff = (dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units'])).total_seconds()\n else:\n # yikes! The particle sizer instances have overlapping data\n # it is assumed that ps2 data replaces ps1 data starting \n # from the overlapping time\n cut, cutdate = tt.findNearestDate(ps1.datetime['data'], ps2.datetime['data'][0]) \n fill_time = False\n \n #print(timediff, 1.5*res1)\n # check if filling is required\n if fill_time is True:\n # check time difference\n if reversed_order:\n # ps1 starts after ps2 ends\n if timediff > 1.5*res2:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res2))\n base = dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res2*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:]))# because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps2.datetime['units']) for dl in date_list]\n ps2.datetime['data'] = np.append(ps2.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps2.time['units']) for dl in date_list]\n ps2.time['data'] = np.append(ps2.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps2.date['units']) for dl in date_list]\n ps2.date['data'] = np.append(ps2.date['data'], datelist)\n else:\n fill_time = False\n else:\n if timediff > 1.5*res1:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res1))\n base = dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res1*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:])) # because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps1.datetime['units']) for dl in date_list]\n ps1.datetime['data'] = np.append(ps1.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps1.time['units']) for dl in date_list]\n ps1.time['data'] = np.append(ps1.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps1.date['units']) for dl in date_list]\n ps1.date['data'] = np.append(ps1.date['data'], datelist)\n else:\n fill_time = False\n \n if message:\n print(\"reversed order:\", reversed_order)\n # check which attributes are similar in both instances\n if reversed_order:\n # ps1 starts after ps2 ends\n new_ps = copy.deepcopy(ps2)\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n \n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps2.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps2.data[var]['data'],add,axis=1)\n ps2.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st21:st22, 0:ps2.data[var]['data'][:,:cut].shape[1]] = ps2.data[var]['data'][:,:cut]\n new_field[st11:st12, ps2.data[var]['data'][:,:cut].shape[1]:] = ps1.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps2[:cut],add), data_ps1)\n else:\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps1,attribute)\n newattribute['time'] = ps1['datetime']['data']\n setattr(new_ps, attribute, newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n \n else:\n # ps2 starts after ps1 ends\n new_ps = copy.deepcopy(ps1)\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps1.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps1.data[var]['data'],add,axis=1)\n ps1.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st11:st12, 0:ps1.data[var]['data'][:,:cut].shape[1]] = ps1.data[var]['data'][:,:cut]\n new_field[st21:st22, ps1.data[var]['data'][:,:cut].shape[1]:] = ps2.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps1[:cut],add), data_ps2)\n else:\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps2,attribute)\n newattribute['time'] = ps2['datetime']['data']\n setattr(new_ps, attribute,newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n new_ps.sample['data'] = np.arange(1.0, len(new_ps.datetime['data'])+1)\n new_ps.instrument_type = ps1.instrument_type.split('_')[0] + '_concatenated'\n \n if message:\n print('filltime: ', fill_time)\n \n return new_ps",
"def _periodically_create_records(self):\n # WINNERS holds the members that have 'won' this cycle\n winners = set()\n\n while True:\n now = time()\n start_climb = int(now / CYCLE_SIZE) * CYCLE_SIZE\n start_create = start_climb + CYCLE_SIZE * 0.5\n start_idle = start_climb + CYCLE_SIZE * 0.9\n start_next = start_climb + CYCLE_SIZE\n\n if start_climb <= now < start_create:\n yield start_create - now\n\n elif start_create <= now < start_idle and len(winners) < self._signature_count:\n logger.debug(\"c%d record creation phase. wait %.2f seconds until record creation\", int(now / CYCLE_SIZE), CYCLE_SIZE * 0.4 / self._signature_count)\n yield (CYCLE_SIZE * 0.4 / self._signature_count) * pythonrandlib.random()\n\n # find the best candidate for this cycle\n score = 0\n winner = None\n for member in self._slope.iterkeys():\n book = self.get_book(member)\n if book.score > score and not member in winners:\n winner = member\n\n if winner:\n logger.debug(\"c%d attempt record creation %s\", int(now / CYCLE_SIZE), winner.mid.encode(\"HEX\"))\n record_candidate = self._slope[winner]\n\n # prevent this winner to 'win' again in this cycle\n winners.add(winner)\n\n # # TODO: this may be and invalid assumption\n # # assume that the peer is online\n # record_candidate.history.set(now)\n\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self.create_barter_record(record_candidate.candidate, winner)\n\n else:\n logger.debug(\"c%d no peers available for record creation (%d peers on slope)\", int(now / CYCLE_SIZE), len(self._slope))\n\n else:\n logger.debug(\"c%d second climbing phase. wait %d seconds until the next phase\", int(now / CYCLE_SIZE), start_next - now)\n assert now >= start_idle or len(winners) >= self._signature_count\n for record_candidate in self._slope.itervalues():\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self._slope = {}\n winners = set()\n yield start_next - now",
"def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)",
"def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog",
"def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0",
"def _fill_day_dicts(self):\n today = datetime.date.today()\n for i, record in enumerate(self._dataset):\n if (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):\n self._add_record(self._all30_dict, record, key=i)\n\n elif (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):\n self._add_record(self._all60_dict, record, key=i)\n\n else:\n self._add_record(self._all90_dict, record, key=i)",
"def merge(self, other, gap_method=\"slinear\", new_sample_rate=None):\n if new_sample_rate is not None:\n merge_sample_rate = new_sample_rate\n combine_list = [self.decimate(new_sample_rate).dataset]\n else:\n merge_sample_rate = self.sample_rate\n combine_list = [self.dataset]\n\n ts_filters = self.filters\n if isinstance(other, (list, tuple)):\n for run in other:\n if not isinstance(run, RunTS):\n raise TypeError(f\"Cannot combine {type(run)} with RunTS.\")\n\n if new_sample_rate is not None:\n run = run.decimate(new_sample_rate)\n combine_list.append(run.dataset)\n ts_filters.update(run.filters)\n else:\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n if new_sample_rate is not None:\n other = other.decimate(new_sample_rate)\n combine_list.append(other.dataset)\n ts_filters.update(other.filters)\n\n # combine into a data set use override to keep attrs from original\n\n combined_ds = xr.combine_by_coords(\n combine_list, combine_attrs=\"override\"\n )\n\n n_samples = (\n merge_sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n merge_sample_rate,\n n_samples,\n self.logger,\n )\n\n run_metadata = self.run_metadata.copy()\n run_metadata.sample_rate = merge_sample_rate\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n ## tried reindex then interpolate_na, but that has issues if the\n ## intial time index does not exactly match up with the new time index\n ## and then get a bunch of Nan, unless use nearest or pad, but then\n ## gaps are not filled correctly, just do a interp seems easier.\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=gap_method\n )\n\n # update channel attributes\n for ch in new_run.channels:\n new_run.dataset[ch].attrs[\"time_period.start\"] = new_run.start\n new_run.dataset[ch].attrs[\"time_period.end\"] = new_run.end\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = ts_filters\n\n return new_run",
"def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times",
"def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result",
"def extract_tt_by_periods(ttri, periods, start_time, end_time, filters):\n logger = getLogger(__name__)\n # sess = conn.get_session()\n das = {}\n all_wz_features = {}\n all_wz_laneconfigs = {}\n\n # collecting daily data\n for prd in periods:\n logger.debug('>>>> retrieving data for %s' % prd.get_date_string())\n year = prd.start_date.year\n sdate = prd.start_date\n edate = prd.end_date\n if year not in das:\n da_tt = tt.TravelTimeDataAccess(year)\n da_tt_wz = tt_workzone.TTWorkZoneDataAccess(year)\n da_tt_wz_feature = wz_feature.WZFeatureDataAccess()\n da_tt_wz_lncfg = wz_laneconfig.WZLaneConfigDataAccess()\n da_tt_weather = tt_weather.TTWeatherDataAccess(year)\n da_tt_snowmgmt = tt_snowmgmt.TTSnowManagementDataAccess(year)\n da_tt_incident = tt_incident.TTIncidentDataAccess(year)\n da_tt_specialevent = tt_specialevent.TTSpecialeventDataAccess(year)\n das[year] = (\n da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent)\n\n (da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent) = das[year]\n\n # traveltimes = da_tt.list_by_period(ttri.id, self.prd)\n weathers = da_tt_weather.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WeatherInfo] \"\"\"\n workzones = da_tt_wz.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WorkZoneInfo] \"\"\"\n incidents = da_tt_incident.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.IncidentInfo] \"\"\"\n snowmgmts = da_tt_snowmgmt.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SnowManagementInfo] \"\"\"\n specialevents = da_tt_specialevent.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SpecialEventInfo] \"\"\"\n traveltimes = da_tt.list_by_period(ttri.id, prd)\n \"\"\":type: list[pyticas_tetres.ttrms_types.TravelTimeInfo] \"\"\"\n\n if not any(weathers):\n logger.debug('>>>> end of retrieving data for %s (no weather data)' % prd.get_date_string())\n continue\n\n extras = {\n 'weathers': {_tt.id: [] for _tt in traveltimes},\n 'workzones': {_tt.id: [] for _tt in traveltimes},\n 'incidents': {_tt.id: [] for _tt in traveltimes},\n 'specialevents': {_tt.id: [] for _tt in traveltimes},\n 'snowmgmts': {_tt.id: [] for _tt in traveltimes},\n }\n \"\"\":type: dict[str, dict[int, list]]\"\"\"\n\n _put_to_bucket(ttri, weathers, extras['weathers'], da_tt_weather, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, workzones, extras['workzones'], da_tt_wz, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, incidents, extras['incidents'], da_tt_incident, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, snowmgmts, extras['snowmgmts'], da_tt_snowmgmt, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, specialevents, extras['specialevents'], da_tt_specialevent, year, all_wz_features, all_wz_laneconfigs, das)\n\n for tti in traveltimes:\n _tt_weathers = extras['weathers'][tti.id]\n extdata = ExtData(tti,\n _tt_weathers[0] if _tt_weathers else None,\n extras['incidents'][tti.id],\n extras['workzones'][tti.id],\n extras['specialevents'][tti.id],\n extras['snowmgmts'][tti.id])\n\n if start_time <= tti.str2datetime(tti.time).time() <= end_time:\n for ef in filters:\n try:\n ef.check(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 1)' % prd.get_date_string())\n continue\n else:\n for ef in filters:\n try:\n ef.check_outofrange(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 2)' % prd.get_date_string())\n continue\n\n del extras\n logger.debug('>>>> end of retrieving data for %s' % prd.get_date_string())\n\n # sess.close()"
] | [
"0.60452324",
"0.5793913",
"0.5528706",
"0.5526547",
"0.5517215",
"0.5493142",
"0.54775965",
"0.54737484",
"0.5462078",
"0.54354554",
"0.5395026",
"0.5321515",
"0.529761",
"0.5283373",
"0.5275335",
"0.5271934",
"0.52359205",
"0.51437724",
"0.51378435",
"0.513382",
"0.51214904",
"0.5115958",
"0.51156235",
"0.50994545",
"0.5098985",
"0.50937",
"0.5085141",
"0.5084859",
"0.5083243",
"0.50631934"
] | 0.7975035 | 0 |
Given a UC480 camera object (instrumental module) and a number indicating the number of trap objects, applies an iterative image analysis to individual trap adjustment in order to achieve a nearly homogeneous intensity profile across traps. | def stabilize_intensity(which_cam, cam, verbose=False):
L = 0.5 # Correction Rate
mags = np.ones(12) ### !
ntraps = len(mags)
iteration = 0
while iteration < 5:
iteration += 1
print("Iteration ", iteration)
im = cam.latest_frame()
try:
trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)
except (AttributeError, ValueError) as e:
print("No Bueno, error occurred during image analysis:\n", e)
break
mean_power = trap_powers.mean()
rel_dif = 100 * trap_powers.std() / mean_power
print(f'Relative Power Difference: {rel_dif:.2f} %')
if rel_dif < 0.8:
print("WOW")
break
deltaP = [mean_power - P for P in trap_powers]
dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]
mags = np.add(mags, dmags)
print("Magnitudes: ", mags)
break
# self._update_magnitudes(mags)
_ = analyze_image(im, ntraps, verbose=verbose) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):\n threshes = [0.5, 0.6]\n margin = 10\n threshold = np.max(image) * threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers",
"def enumerate_detector(det, thresholds, shot_ok=None, tiles=None, nimages=np.inf, startimg=0, stopimg=np.inf, correction=False, progress=True):\n global terminated\n Ncorrect = 64\n correctionphotonthres = 3000\n if not isinstance(det, h5py.Group):\n raise TypeError('det should be a h5 group')\n if tiles is None:\n tiles = [k for k in det.keys() if 'tile' in k]\n else:\n newtiles = []\n for t in tiles:\n if t in det:\n newtiles.append(t)\n elif f'tile{t}' in det:\n newtiles.append(f'tile{t}')\n else:\n raise KeyError(f'tile {t} not found')\n tiles = newtiles\n datanames = [(f'{det.name}/{t}/data') for t in tiles]\n filename = det.file.filename\n\n nshots = det[f'{tiles[0]}/data'].shape[0]\n startimg = int(np.clip(startimg, 0, nshots))\n stopimg = int(np.clip(stopimg, startimg, nshots))\n tileshape = det[f'{tiles[0]}/data'].shape[1:]\n correctmask = [correctionmask(det[t]['absfft0/mean'], Ncorrect) for t in tiles]\n if shot_ok is None:\n shot_ok = np.ones(nshots, np.bool)\n ind_filtered = 0\n data = np.zeros((len(tiles), *tileshape))\n willread = np.copy(shot_ok)\n willread[:startimg] = False\n willread[stopimg:] = False\n with datasetreader(datanames, filename, sizecache=10, willread=willread) as reader:\n\n for ind_orig in range(startimg, stopimg):\n if not shot_ok[ind_orig]:\n continue\n if ind_filtered >= nimages or terminated != 0:\n return\n if progress and ind_filtered % 10 == 0:\n print(ind_filtered, end=' ', flush=True)\n\n for it, t in enumerate(tiles):\n cdat = np.array(reader[ind_orig, it], dtype=np.float, order='C')\n if correction:\n correct(cdat, correctionphotonthres, Ncorrect, correctmask[it])\n data[it, ...] = cdat\n ev, number, scatter = getstats(data, thresholds)\n\n yield (ind_filtered, ind_orig, data, ev, number, scatter)\n\n ind_filtered += 1",
"def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)",
"def simImpacts(blankimage):\n\n # - Initialize variables - #\n count = 0\n unique = .001\n uniquelist = []\n cratersatstep = []\n cratermap = blankimage\n\n # -- Loop until saturation -- #\n while True:\n # - Wait until we have at least 10000 iterations before checking if we\n # have reached saturation - #\n if len(cratersatstep) > 10000:\n # - We calculate average by comparing the average of the last 1000\n # to the average of the last 100 - #\n smallAvg = np.average(cratersatstep[-100:])\n bigAvg = np.average(cratersatstep[-1000:])\n # - If we have reached saturation we can leave the loop - #\n if abs( smallAvg - bigAvg ) < (bigAvg * (1 - 0.99)):\n return cratermap, count, uniquelist, cratersatstep\n\n # - Every 1000 impacts we should save an image so we can compare - #\n if count%1000 == 0:\n pl.imshow(image)\n pl.title('Uniform Craters after '+str(int(count))+' Impactors')\n pl.savefig('../images/Uniform'+str(int(count/1000))+'.png')\n pl.clf()\n\n # --- BEGIN SIMULATION CODE --- #\n # - Increment our impactor count - #\n count += 1\n\n # - Generate the location for the center of the crater - #\n x = int(np.random.rand()*500.)\n y = int(np.random.rand()*500.)\n\n # - All of our impactors are the same size since this is our uniform sim - #\n impactsize = 10\n\n # - Pass our image array, the impact size (divided by 2 for radius)\n # origin of the impact, and a unique color value to drawCircle function - #\n cratermap = drawCircle(cratermap, int(impactsize / 2.), [x,y], unique)\n # - Get all of the unique color values still in cratermap - #\n uniquelist = np.unique(cratermap[:,:,0])\n # - Keep track of how many craters we can see at each step of the loop - #\n cratersatstep.append(len(uniquelist))\n\n # - Add to our unique value to keep it unique! - #\n unique += .001\n \n return cratermap, count , uniquelist, cratersvisible",
"def test_nirspec_aperture_transforms(verbose=False, siaf=None):\n if siaf is None:\n siaf = Siaf(instrument)\n else:\n siaf = copy.deepcopy(siaf)\n\n labels = ['X', 'Y']\n threshold = 0.2\n\n from_frame = 'sci'\n to_frames = 'det gwa idl tel'.split()\n\n x_sci = np.linspace(-10, 10, 3)\n y_sci = np.linspace(10, -10, 3)\n\n for include_tilt in [False, True]:\n\n for aper_name in siaf.apertures.keys():\n skip = False\n\n # aperture\n aperture = siaf[aper_name]\n # offset slightly from default tilt values\n\n if (aperture.AperType in ['COMPOUND', 'TRANSFORM', 'SLIT']) or ('_FULL' not in aper_name):\n skip = True\n\n if skip is False:\n if(include_tilt is True):\n # set tilt to a representative off nominal value\n gwa_aperture = getattr(aperture, '_CLEAR_GWA_OTE')\n rx0 = getattr(gwa_aperture, 'XSciRef')\n ry0 = getattr(gwa_aperture, 'YSciRef')\n aperture.tilt = (ry0 - 0.002, rx0 - 0.01)\n \n # test transformations\n if verbose:\n print('testing {} {} Tilt={}'.format(siaf.instrument, aper_name, aperture.tilt))\n\n for to_frame in to_frames:\n forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))\n backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))\n\n x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))\n x_mean_error = np.mean(np.abs(x_sci - x_out))\n y_mean_error = np.mean(np.abs(y_sci - y_out))\n for i, error in enumerate([x_mean_error, y_mean_error]):\n if verbose:\n print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(\n siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))\n assert error < threshold",
"def enumerate_detector(det, thresholds, shot_ok=None, tiles=None, nimages=np.inf, stats=True, correction=False, progress=True, photonfunction=None):\n Ncorrect = 64\n correctionphotonthres = 3000\n if not isinstance(det, h5py.Group):\n raise TypeError('det should be a h5 group')\n if tiles is None:\n tiles = [k for k in det.keys() if 'tile' in k]\n else:\n newtiles = []\n for t in tiles:\n if t in det:\n newtiles.append(t)\n elif f'tile{t}' in det:\n newtiles.append(f'tile{t}')\n else:\n raise KeyError(f'tile {t} not found')\n tiles = newtiles\n multitiles = not (len(tiles) == 1 and 'data' in det[tiles[0]])\n mincorners = []\n maxcorners = []\n rots = []\n datanames = []\n filename = det.file.filename\n nshots = det[f'{tiles[0]}/data'].shape[0]\n correctmask = []\n for t in tiles:\n d = det[t]\n offset = np.rint(d.attrs['detector_tile_position_in_pixels'])\n rot = int(d.attrs['detector_rotation_steps'][0])\n rots.append(rot)\n n, a, b = d['data'].shape\n if n != nshots:\n raise ValueError('tiles should have same number of shots')\n shape = ((a, b), (-b, a), (-a, -b), (b, -a))[rot % 4]\n corners = (offset, (shape + offset))\n mincorners.append(np.min(corners, axis=0))\n maxcorners.append(np.max(corners, axis=0))\n datanames.append(f'{d.name}/data')\n if correction:\n correctmask.append(correctionmask(det[t]['absfft0/mean'], Ncorrect))\n\n globaloffset = np.floor(np.min(mincorners, axis=0)).astype(int)\n extent = [fastlen(x) for x in (np.ceil(np.max(maxcorners, axis=0)) - globaloffset)]\n startx, starty = [list(s) for s in (np.floor(mincorners - globaloffset).astype(int)).T]\n\n if shot_ok is None:\n shot_ok = np.ones(nshots, np.bool)\n assembled = np.zeros(extent, np.float64)\n global terminated\n ind_filtered = 0\n with datasetreader(datanames, filename, willread=shot_ok) if multitiles else arrayreader(det[tiles[0]]['data']) as reader:\n for ind_orig in range(nshots):\n if not shot_ok[ind_orig]:\n continue\n if ind_filtered >= nimages or terminated != 0:\n return\n if progress and ind_filtered % 100 == 0:\n print(ind_filtered, end=' ', flush=True)\n\n for t in range(len(tiles)):\n if multitiles:\n tile = np.asarray(reader[ind_orig, t], order='C', dtype=np.float64)\n if correction:\n correct(tile, correctionphotonthres, Ncorrect, correctmask[t], rots[t], assembled, startx[t], starty[t])\n else:\n place(tile, rots[t], assembled, startx[t], starty[t])\n else:\n if correction:\n tile = np.asarray(reader[ind_orig], order='C', dtype=np.float64)\n correct(tile, correctionphotonthres, Ncorrect, correctmask[t], rots[t], assembled, startx[t], starty[t])\n else:\n assembled = np.asarray(np.rot90(reader[ind_orig], rots[t]), order='C', dtype=np.float64)\n \n\n \n numberfromfunc = photonfunction(assembled) if photonfunction is not None else None\n if thresholds is not None:\n if stats:\n ev, number, scatter = getstats(assembled, thresholds)\n yield (ind_filtered, ind_orig, np.copy(assembled), ev, number, scatter, numberfromfunc)\n else:\n number = getphotons(assembled, thresholds)\n yield (ind_filtered, ind_orig, np.copy(assembled), None, number, None, numberfromfunc)\n else: \n yield (ind_filtered, ind_orig, np.copy(assembled), None, None, None, numberfromfunc)\n\n \n ind_filtered += 1",
"def eye_timings(self, nr_dummy_scans = 6, mystery_threshold = 0.05,saccade_duration_threshold = 10):\n\n\t\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\t# shell()\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr = round(niiFile.rtime*1)/1000.0\n\t\t\twith open (self.runFile(stage = 'processed/eye', run = r, extension = '.msg')) as inputFileHandle:\n\t\t\t\tmsg_file = inputFileHandle.read()\n\n\n\t\t\tsacc_re = 'ESACC\\t(\\S+)[\\s\\t]+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+.?\\d+)'\n\t\t\tfix_re = 'EFIX\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?'\n\t\t\tblink_re = 'EBLINK\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d?.?\\d*)?'\n\t\t\tstart_eye = 'START\\t(-?\\d+\\.?\\d*)'\n\n\t\t\t# self.logger.info('reading eyelink events from %s', os.path.split(self.message_file)[-1])\n\t\t\tsaccade_strings = re.findall(re.compile(sacc_re), msg_file)\n\t\t\tfix_strings = re.findall(re.compile(fix_re), msg_file)\n\t\t\tblink_strings = re.findall(re.compile(blink_re), msg_file)\n\t\t\tstart_time_scan = float(re.findall(re.compile(start_eye),msg_file)[0])\n\t\t\t\n\t\t\tif len(saccade_strings) > 0:\n\t\t\t\tself.saccades_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'start_x':float(e[4]),'start_y':float(e[5]),'end_x':float(e[6]),'end_y':float(e[7]), 'mystery_measure':float(e[8]),'peak_velocity':float(e[9])} for e in saccade_strings]\n\t\t\t\tself.fixations_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'x':float(e[4]),'y':float(e[5]),'pupil_size':float(e[6])} for e in fix_strings]\n\t\t\t\tself.blinks_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3])} for e in blink_strings]\n\t\t\t\n\t\t\t\tself.saccade_type_dictionary = np.dtype([(s , np.array(self.saccades_from_message_file[0][s]).dtype) for s in self.saccades_from_message_file[0].keys()])\n\t\t\t\tself.fixation_type_dictionary = np.dtype([(s , np.array(self.fixations_from_message_file[0][s]).dtype) for s in self.fixations_from_message_file[0].keys()])\n\t\t\t\tif len(self.blinks_from_message_file) > 0:\n\t\t\t\t\tself.blink_type_dictionary = np.dtype([(s , np.array(self.blinks_from_message_file[0][s]).dtype) for s in self.blinks_from_message_file[0].keys()])\n\t\t\t\n\t\t\teye_blinks = [[((self.blinks_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.blinks_from_message_file[i]['duration']/1000,1] for i in range(len(self.blinks_from_message_file)) if (self.blinks_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000)]\n\t\t\t\n\t\t\t\n\t\t\tsaccades = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\tsaccades_thresholded = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['mystery_measure'] > mystery_threshold), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['eye_blinks']), np.array(eye_blinks), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades']), np.array(saccades), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades_thresholded']), np.array(saccades_thresholded), fmt = '%3.2f', delimiter = '\\t')",
"def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)",
"def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t",
"def run(self):\n openShutter = True\n actuateXed = False\n image_type = \"PPUMP\"\n\n wl = float(self.eo_config.get(\"PPUMP_WL\", 550))\n meas_flux = self.measured_flux(wl)\n seqno = 0\n for tokens in self.instructions:\n exptime = float(tokens[1])\n nframes = int(tokens[2])\n shifts = int(tokens[3])\n for iframe in range(nframes):\n self.image_clears()\n self.bias_image(seqno)\n self.take_image(seqno, exptime, openShutter, actuateXed,\n image_type)\n seqno += 1",
"def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1",
"def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg",
"def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht",
"def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)",
"def mri_dixon_analysis(data_objects, working_dir, settings):\n\n logger.info(\"Running Dixon analysis Calculation\")\n logger.info(\"Using settings: %s\", settings)\n\n output_objects = []\n\n fat_obj = None\n water_obj = None\n for data_obj in data_objects:\n\n if data_obj.meta_data[\"image_type\"] == \"fat\":\n fat_obj = data_obj\n\n if data_obj.meta_data[\"image_type\"] == \"water\":\n water_obj = data_obj\n\n if fat_obj is None or water_obj is None:\n logger.error(\"Both Fat and Water Images are required\")\n return []\n\n # Read the image series\n fat_load_path = fat_obj.path\n if fat_obj.type == \"DICOM\":\n fat_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(fat_obj.path)\n fat_img = sitk.ReadImage(fat_load_path)\n\n water_load_path = water_obj.path\n if water_obj.type == \"DICOM\":\n water_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(water_obj.path)\n water_img = sitk.ReadImage(water_load_path)\n\n # Cast to float for calculation\n fat_img = sitk.Cast(fat_img, sitk.sitkFloat32)\n water_img = sitk.Cast(water_img, sitk.sitkFloat32)\n\n # Let's do the calcuation using NumPy\n fat_arr = sitk.GetArrayFromImage(fat_img)\n water_arr = sitk.GetArrayFromImage(water_img)\n\n # Do the calculation\n divisor = water_arr + fat_arr\n fat_fraction_arr = (fat_arr * 100) / divisor\n fat_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n water_fraction_arr = (water_arr * 100) / divisor\n water_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n\n fat_fraction_img = sitk.GetImageFromArray(fat_fraction_arr)\n water_fraction_img = sitk.GetImageFromArray(water_fraction_arr)\n\n fat_fraction_img.CopyInformation(fat_img)\n water_fraction_img.CopyInformation(water_img)\n\n # Create the output Data Objects and add it to output_ob\n fat_fraction_file = os.path.join(working_dir, \"fat.nii.gz\")\n sitk.WriteImage(fat_fraction_img, fat_fraction_file)\n water_fraction_file = os.path.join(working_dir, \"water.nii.gz\")\n sitk.WriteImage(water_fraction_img, water_fraction_file)\n\n fat_data_object = DataObject(type=\"FILE\", path=fat_fraction_file, parent=fat_obj)\n output_objects.append(fat_data_object)\n\n water_data_object = DataObject(type=\"FILE\", path=water_fraction_file, parent=water_obj)\n output_objects.append(water_data_object)\n\n return output_objects",
"def imagetest(thetainput,doubleopponencyinput):\n theta = thetainput\n rgcMode = doubleopponencyinput\n\n\n C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE\n S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND\n \n if rgcMode == 0:\n \tpV,nV = rgc.opponency(C,S,theta)\n else:\n \tpV,nV = rgc.doubleopponency(C,S,theta)\n cv2.namedWindow(\"Input\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Input\", img)\n rIntensity,cIntensity = showNonOpponency(C,theta)\n cv2.namedWindow(\"Intensity Responses\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses\", rIntensity)\n cv2.namedWindow(\"Intensity Responses Cortex\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses Cortex\", cIntensity)\n cv2.waitKey(0)\n #Generate backprojected images\n if showInverse:\n rOpponent = showBPImg(pV,nV)\n cv2.namedWindow(\"Backprojected Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Backprojected Opponent Cells Output\", rOpponent)\n cv2.waitKey(0)\n # Cortex\n if showCortex:\n cOpponent = showCortexImg(pV,nV)\n cv2.namedWindow(\"Cortex Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Cortex Opponent Cells Output\", cOpponent)\n cv2.waitKey(0)",
"def calculate_uip(vpx, raster, weight, neuron, tau):\n\n m = 1\n\n vpx[0] = weight[neuron, raster[0][\"id\"]]\n\n for k, evt in enumerate(raster[1:], 1):\n\n dt = evt[\"time\"] - raster[k - 1][\"time\"]\n\n if not dt:\n m -= 1\n else:\n vpx[m] = vpx[m - 1] * np.exp(-dt / tau)\n\n vpx[m] += weight[neuron, evt[\"id\"]]\n\n m += 1",
"def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile",
"def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot",
"def transform(self, dataset, number_of_thresholds=1,\n enable_valley_emphasis=False):\n\n # Initial progress\n self.progress.value = 0\n self.progress.maximum = 100\n\n # Approximate percentage of work completed after each step in the\n # transform\n STEP_PCT = [10, 20, 70, 90, 100]\n\n try:\n import itk\n import itkExtras\n import itkTypes\n from tomviz import itkutils\n except Exception as exc:\n print(\"Could not import necessary module(s)\")\n raise exc\n\n # Return values\n returnValues = None\n\n # Add a try/except around the ITK portion. ITK exceptions are\n # passed up to the Python layer, so we can at least report what\n # went wrong with the script, e.g,, unsupported image type.\n try:\n self.progress.value = STEP_PCT[0]\n self.progress.message = \"Converting data to ITK image\"\n\n # Get the ITK image\n itk_image = itkutils.dataset_to_itk_image(dataset)\n itk_input_image_type = type(itk_image)\n\n # OtsuMultipleThresholdsImageFilter's wrapping requires that the\n # input and output image types be the same.\n itk_threshold_image_type = itk_input_image_type\n\n # Otsu multiple threshold filter\n otsu_filter = itk.OtsuMultipleThresholdsImageFilter[\n itk_input_image_type, itk_threshold_image_type].New()\n otsu_filter.SetNumberOfThresholds(number_of_thresholds)\n otsu_filter.SetValleyEmphasis(enable_valley_emphasis)\n otsu_filter.SetInput(itk_image)\n itkutils.observe_filter_progress(self, otsu_filter,\n STEP_PCT[1], STEP_PCT[2])\n\n try:\n otsu_filter.Update()\n except RuntimeError:\n return\n\n print(\"Otsu threshold(s): %s\" % (otsu_filter.GetThresholds(),))\n\n itk_image_data = otsu_filter.GetOutput()\n\n # Cast threshold output to an integral type if needed.\n py_buffer_type = itk_threshold_image_type\n voxel_type = itkExtras.template(itk_threshold_image_type)[1][0]\n if voxel_type is itkTypes.F or voxel_type is itkTypes.D:\n self.progress.message = \"Casting output to integral type\"\n\n # Unsigned char supports 256 labels, or 255 threshold levels.\n # This should be sufficient for all but the most unusual use\n # cases.\n py_buffer_type = itk.Image.UC3\n caster = itk.CastImageFilter[itk_threshold_image_type,\n py_buffer_type].New()\n caster.SetInput(itk_image_data)\n itkutils.observe_filter_progress(self, caster,\n STEP_PCT[2], STEP_PCT[3])\n\n try:\n caster.Update()\n except RuntimeError:\n return\n\n itk_image_data = caster.GetOutput()\n\n self.progress.value = STEP_PCT[3]\n self.progress.message = \"Saving results\"\n\n label_map_dataset = dataset.create_child_dataset()\n itkutils.set_itk_image_on_dataset(itk_image_data, label_map_dataset,\n dtype=py_buffer_type)\n\n self.progress.value = STEP_PCT[4]\n\n # Set up dictionary to return operator results\n returnValues = {}\n returnValues[\"label_map\"] = label_map_dataset\n\n except Exception as exc:\n print(\"Problem encountered while running %s\" %\n self.__class__.__name__)\n raise exc\n\n return returnValues",
"def process_observation(self, observation):\n #print(\"start_process_obs\")\n processed_observation = np.zeros((NB_AGENTS, OBSERVATION_SIZE))\n\n goliath_type = getattr(env, 'Terran_Goliath')\n battlecruiser_type = getattr(env, 'Terran_Battlecruiser')\n '''\n goliath and battlecruiser type:\n hp_max: 125\n armor: 1\n cooldown_max: 22\n acceleration: 1\n top_speed: 4.57\n damage_amount: 12\n damage_factor: 1\n weapon_range: 192\n sight_range: 256\n seek_range: 160\n\n hp_max: 500\n energy_max: 200\n armor: 3\n cooldown_max: 30\n acceleration: 27\n top_speed: 2.5\n damage_amount: 25\n damage_factor: 1\n weapon_range: 192\n sight_range: 352\n '''\n #print(\"goliath and battlecruiser type:\")\n #print(goliath_type)\n #print(battlecruiser_type)\n\n for i, agent in enumerate(observation.my_unit):\n if agent.hp <= 0:\n continue\n my_x = agent.pos_x\n my_y = agent.pos_y\n my_type_str = agent.unit_type\n my_type = goliath_type if my_type_str == 'Terran_Goliath' else print(\"error in the my_type\")\n t1 = [agent.hp + agent.shield, agent.cooldown, math.atan2(agent.velocity_y, agent.velocity_x),\n math.sqrt((agent.velocity_x) ** 2 + (agent.velocity_y) ** 2), agent.angle,\n 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame]\n t2 = [self.last_action[i] / (env.action_space[1] - 1)]\n t3 = [i.nearest_obstacle_dist for i in agent.pos_info]\n t4 = []\n t5 = []\n t4_max = []\n t5_max = []\n for idx, enemy in enumerate(observation.en_unit):\n en_type_str = enemy.unit_type\n if en_type_str == 'Terran_Battlecruiser':\n en_type = battlecruiser_type\n else:\n continue \n if enemy.hp <= 0:\n t4.extend([0,0,0,0,0,0,0,0,0,0])\n else:\n t4.extend([math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x), math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2),\n math.atan2(enemy.velocity_y, enemy.velocity_x), math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2),\n enemy.cooldown, enemy.hp + enemy.shield, enemy.angle, 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame])\n t4_max.extend([math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1])\n for idx, ally in enumerate(observation.my_unit):\n if i == idx:\n continue\n if ally.hp <= 0:\n t5.extend([0,0,0,0,0])\n else:\n t5.extend([math.atan2(ally.pos_y - my_y, ally.pos_x - my_x), math.sqrt((ally.pos_x - my_x) ** 2 + (ally.pos_y - my_y) ** 2),\n math.atan2(ally.velocity_y, ally.velocity_x), math.sqrt((ally.velocity_x) ** 2 + (ally.velocity_y) ** 2), ally.hp + ally.shield])\n ally_type = goliath_type\n t5_max.extend([math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max])\n if my_type_str == 'Terran_Goliath':\n t1_max = [my_type.hp_max + my_type.shield_max, 1, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n else:\n t1_max = [my_type.hp_max + my_type.shield_max, my_type.cooldown_max, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n #t4_max = [math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1]\n #t5_max = [math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max]\n\n #t5_max = [32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi]\n\n t1 = np.divide(t1, t1_max) # runtime warning\n t2 = np.array(t2) / 320\n t3 = np.array(t3) / 320\n t4 = np.divide(t4, t4_max)\n t5 = np.divide(t5, t5_max)\n\n processed_observation[i] = np.concatenate([t1, t2, t3, t4, t5])\n\n self.last_my_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.my_unit]) > 0))\n self.last_enemy_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.en_unit]) > 0))\n self.last_enemy_unit_hp.append(sum([u.hp + u.shield for u in observation.en_unit]))\n self.accumulated_observation.append(processed_observation)\n\n\n return processed_observation",
"def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")",
"def photometry(userinputs, image, catalog, outputname, apertures, annulus='', dannulus='', recenter=False):\n logging.info('Running photometry function on {}'.format(image))\n logging.info('Using {}px apertures'.format(apertures))\n\n #set directory\n target_dir = userinputs['OUTDIR']\n\n #Update passed names to be full paths if they are not\n\n if len(image.split('/'))==1:\n logging.info('Looking for {} in {}.'.format(image,userinputs['DATA']))\n image = glob.glob(userinputs['DATA'] + '/' + image)\n if len(image)==0:\n logging.critical('No {} image found'.format(image))\n filemanagement.shutdown('Selected image does not exist',userinputs)\n else:\n image = image[0]\n logging.debug('Using image: {}'.format(image))\n\n if len(catalog.split('/'))==1:\n catalog = target_dir + '/init/' + catalog\n logging.debug('Input catalog: {}'.format(catalog))\n\n if len(outputname.split('/'))==1:\n output = target_dir + '/photometry/' + outputname\n logging.debug('Output name: {}'.format(output))\n else:\n output = outputname\n outputname = outputname.split('/')[-1]\n logging.debug('Output name: {}'.format(output))\n\n\n #Load zeropoints\n inst_zp, filter_zp, zp_zp = np.loadtxt(target_dir + '/init/Hi-PEEC_zeropoints.tab', unpack=True, dtype='str')\n # print inst_zp, filter_zp, zp_zp\n # Get filter from header\n filter = get_filter(image)\n\n\n # Set the necessary variables for photometry on the reference image\n exptime = fits.getheader(image)['EXPTIME']\n logging.debug('Exposure time from header: {}'.format(exptime))\n inst = fits.getheader(image)['INSTRUME']\n logging.debug('Intrument from header: {}'.format(inst))\n inst = inst.lower()\n\n\n match = (inst_zp == inst) & (filter_zp == filter.lower())\n zp = zp_zp[match]\n\n # zp is a string within an array, so need to turn into a float\n try:\n zp = float(zp[0])\n #If that cannot be done there was no match.\n except IndexError:\n if inst == 'acs':\n logging.debug('Zeropoint not found in file, passing to ACS calculation')\n zp = ACS_zeropoint(image)\n elif inst == 'wfc3':\n logging.debug('Zeropoint not found in file, passing to WFC3 calculation')\n zp = WFC3_zeropoint(image)\n else:\n logging.critical('No matching zeropoint found. Quitting.')\n logging.debug('No zeropoint match found for filter {} with instrument {}'\\\n .format(filter,inst))\n logging.debug('Available filters in zeropoint file : {} for instrument {}'\\\n .format(filter_zp, inst_zp))\n filemanagement.shutdown('No zeropoint was found for filter: {}'.format(filter),userinputs)\n\n logging.debug('Zeropoint from file: {}'.format(zp))\n # Remove output file if it already exists\n filemanagement.remove_if_exists(output)\n\n\n # Run photometry\n #--------------------------------------------------------------------------\n # Set up IRAF params:\n iraf.datapars.epadu = exptime\n\n # !!!!!!!!!!!!!!!!!\n # Only center on reference frame\n if recenter:\n iraf.centerpars.calgorithm = 'centroid'\n else:\n iraf.centerpars.calgorithm = 'none'\n # !!!!!!!!!!!!!!!\n # CHANGE BACKGROUND ESTIMATE IN ANNULUS TO MODE\n\n # Select the annulus depending on whether it is overwritten in the function call or not\n if annulus == '':\n iraf.fitskypars.annulus = userinputs['ANNULUS']\n logging.debug('Using annulus from inputfile ({}px)'.format(userinputs['ANNULUS']))\n else:\n iraf.fitskypars.annulus = annulus\n logging.debug('Using user specified annulus ({}px)'.format(annulus))\n if dannulus == '':\n iraf.fitskypars.dannulus = userinputs['D_ANNULUS']\n logging.debug('Using annulus width from inputfile ({}px)'.format(userinputs['D_ANNULUS']))\n else:\n iraf.fitskypars.dannulus = dannulus\n logging.debug('Using user specified annulus width ({}px)'.format(dannulus))\n\n iraf.photpars.apertures = apertures\n logging.debug('Using aperture(s) of {}px'.format(apertures))\n iraf.photpars.zmag = zp\n logging.debug('Setting zeropoint to {}'.format(zp))\n\n # Do phot\n iraf.phot(image+'[SCI]', catalog, output)\n #--------------------------------------------------------------------------\n\n\n #Depending on the number of apertures used, different methods of saving the\n # results are required\n #--------------------------------------------------------------------------\n\n naper = len(apertures.split(','))\n logging.debug('Number of apertures used {}'.format(naper))\n\n #final output filename\n fullcat_mag_short = target_dir + '/photometry/short_' + outputname\n\n if naper > 1:\n # Removes all outputlines that do not contain the character '*'\n # ensures only phot results are kept\n cmd = 'grep \"*\" ' + output + ' > ' + fullcat_mag_short\n os.system(cmd)\n\n # Replace INDEFS:\n cmd = 'sed -i.bak \"s/INDEF/99.999/g\" ' + fullcat_mag_short\n os.system(cmd)\n\n # Remove .bak files to prevent confusion\n bak_fullcat = fullcat_mag_short + '.bak'\n os.remove(bak_fullcat)\n\n\n else:\n #Dump results into a temp file\n temp = target_dir + '/photometry/phot_dump.mag'\n filemanagement.remove_if_exists(temp)\n iraf.txdump(output, 'XCENTER,YCENTER,FLUX,MAG,MERR,MSKY,ID', 'yes', Stdout = temp)\n\n # Set placeholders for sources outside of FOV and undetected sources\n # For outside of FOV, use 66.666 instead of INDEF\n # For undetected sources, use 99.999 instead of INDEF\n\n # Sources outside of FOV have exactly zero flux\n x, y, flux, mag, merr, msky, id = np.loadtxt(temp, unpack = True,\n dtype = str)\n\n flux = flux.astype(float)\n\n out_fov = (flux == 0.)\n logging.debug('Number of sources outside FOV: {}'.format(len(out_fov)))\n\n mag[out_fov] = 66.666\n merr[out_fov] = 66.666\n msky[out_fov] = 66.666\n\n # Undetected sources, those with negative flux or fluxes so small that mag err\n # is INDEF\n neg_flux = (flux < 0.)\n tiny_flux = (flux > 0.) & (merr == 'INDEF')\n\n mag[neg_flux] = 99.999\n merr[neg_flux] = 99.999\n msky[neg_flux] = 99.999\n\n merr[tiny_flux] = 99.999\n msky[tiny_flux] = 99.999\n\n logging.debug('Nr of undetected sources: {}'.format(len(tiny_flux)+len(neg_flux)))\n # Save results to new file\n x = x.astype(float)\n y = y.astype(float)\n mag = mag.astype(float)\n merr = merr.astype(float)\n msky = msky.astype(float)\n id = id.astype(int)\n\n zip_phot = zip(x, y, mag, merr, msky, id)\n\n np.savetxt(fullcat_mag_short, zip_phot,\n fmt = '%.3f %.3f %.3f %.3f %.9f %i')\n\n #--------------------------------------------------------------------------\n\n return fullcat_mag_short",
"def main():\n camera = picamera.PiCamera()\n camera.resolution = (RESOLUTIONX, RESOLUTIONY)\n camera.iso = 800\n time.sleep(2)\n while True:\n camera.capture('current-image.jpg')\n adapt_steering(navigation.get_xposition('current-image.jpg'))\n time.sleep(0.4)",
"def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic",
"def calculate_thresholds(self):\n \n for group in self.roi_groups:\n for roi in group.rois:\n for image in range(len(roi.counts)):\n # print(roi.autothreshs)\n # print('image',image)\n if roi.autothreshs[image]:\n values = np.fromiter(roi.counts[image].values(), dtype=float)\n roi.thresholds[image] = self.calculate_threshold(values)\n\n for image, im_copy in enumerate(self.copy_im_threshs): # copy values from a different image and set to manual thresh if needed\n if im_copy is not None:\n for group in self.roi_groups:\n for roi in group.rois:\n roi.autothreshs[image] = False\n roi.thresholds[image] = roi.thresholds[im_copy]",
"def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad",
"def process_sample(self, ch, method, properties, body):\n\n # data inside a dictionary\n # {'amplitude':[1.3,4.4,5...],\n # 'angle': [0.04,0.1,...]}\n sample_dict = self.deserialize_vtt_60_processed(body)\n\n with open('sample_test_run.txt', 'a') as f:\n x_arrstr = np.char.mod('%d', sample_dict['amplitude'])\n\n # x_arrstr -> should be 2d array \"frame\".\n raw_data = \",\".join(x_arrstr.flatten())\n f.write(raw_data)\n f.write('\\n')\n\n arr = np.array(sample_dict['amplitude'])\n\n frame = np.reshape(arr, (180,110))\n frame_transposed_flipped = np.flip(np.transpose(frame))\n frame_transposed_flipped = np.flip(np.transpose(frame))\n details_removed = frame_transposed_flipped\n details_removed = np.clip(frame_transposed_flipped, a_min=frame_transposed_flipped.max() - 15, a_max=None)\n self.i = self.i + 1\n if self.i % 10 == 0:\n b = sum(pd.DataFrame(frame).max().diff() / pd.DataFrame(frame).max() > 0.13)\n self.report_people(b)\n\n\n if body is None or sample_dict is None:\n print('Stream stopped.')\n return\n if self.samples_in_total % self.fps == 0:\n endTime = time.time()\n print('FPS: {:.1f}'.format(self.fps/(endTime - self.startTime)))\n self.startTime = endTime\n \n # your code here\n amplitude = sample_dict['amplitude']\n amplitude = np.reshape(amplitude,(180,110))\n amplitude = np.where(amplitude>130,130,amplitude)\n amplitude = np.uint8(255*amplitude/130)\n out = cv2.cvtColor(amplitude,cv2.COLOR_GRAY2BGR)\n out = cv2.applyColorMap(out,cv2.COLORMAP_MAGMA)\n cv2.imshow('junction',out)\n cv2.waitKey(10)",
"def optimize_trap(dg):\n f_peak = './temp_peak.lh5' # lh5\n f_results = './temp_results.h5' # pandas\n grp_data, grp_grid = '/optimize_data', '/optimize_grid'\n \n # epar, elo, ehi, epb = 'energy', 0, 1e7, 10000 # full range\n epar, elo, ehi, epb = 'energy', 3.88e6, 3.92e6, 500 # K40 peak\n \n show_movie = True\n write_output = True\n n_rows = None # default None\n \n with open('opt_trap.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n \n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n\n # quick check of the energy range\n # ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n # exit()\n \n # set grid parameters\n # TODO: jason's suggestions, knowing the expected shape of the noise curve\n # e_rises = np.linspace(-1, 0, sqrt(sqrt(3))\n # e_rises # make another list which is 10^pwr of this list\n # np.linspace(log_tau_min, log_tau_max) # try this too\n e_rises = np.arange(1, 12, 1)\n e_flats = np.arange(1, 6, 1)\n # rc_consts = np.arange(54, 154, 10) # changing this here messes up DCR\n \n # -- create the grid search file the first time -- \n # NOTE: this makes a linear grid, and is editable by the arrays above.\n # jason also proposed a more active gradient-descent style search\n # like with Brent's method. (https://en.wikipedia.org/wiki/Brent%27s_method)\n \n if True:\n # if not os.path.exists(f_peak):\n print('Recreating grid search file')\n \n # create the grid file\n # NOTE: save it as an lh5 Table just as an example of writing/reading one\n lists = [e_rises, e_flats]#, rc_consts]\n prod = list(itertools.product(*lists)) # clint <3 stackoverflow\n df_grid = pd.DataFrame(prod, columns=['rise', 'flat'])#,'rc']) \n lh5_grid = {}\n for i, dfcol in df_grid.iteritems():\n lh5_grid[dfcol.name] = lh5.Array(dfcol.values)\n tb_grid = lh5.Table(col_dict=lh5_grid)\n sto.write_object(tb_grid, grp_grid, f_peak)\n \n # filter events by onboard energy\n ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n if n_rows is not None:\n ene_raw = ene_raw[:n_rows]\n idx = np.where((ene_raw > elo) & (ene_raw < ehi))\n\n # create a filtered table with correct waveform and attrs\n # TODO: move this into a function in lh5.py which takes idx as an input\n tb_data, wf_tb_data = lh5.Table(), lh5.Table()\n\n # read non-wf cols (lh5 Arrays)\n data_raw = sto.read_object(tb_raw, f_raw, n_rows=n_rows)\n for col in data_raw.keys():\n if col=='waveform': continue\n newcol = lh5.Array(data_raw[col].nda[idx], attrs=data_raw[col].attrs)\n tb_data.add_field(col, newcol)\n \n # handle waveform column (lh5 Table)\n data_wfs = sto.read_object(tb_raw+'/waveform', f_raw, n_rows=n_rows)\n for col in data_wfs.keys():\n attrs = data_wfs[col].attrs\n if isinstance(data_wfs[col], lh5.ArrayOfEqualSizedArrays):\n # idk why i can't put the filtered array into the constructor\n aoesa = lh5.ArrayOfEqualSizedArrays(attrs=attrs, dims=[1,1])\n aoesa.nda = data_wfs[col].nda[idx]\n newcol = aoesa\n else:\n newcol = lh5.Array(data_wfs[col].nda[idx], attrs=attrs)\n wf_tb_data.add_field(col, newcol)\n tb_data.add_field('waveform', wf_tb_data)\n tb_data.attrs = data_raw.attrs\n sto.write_object(tb_data, grp_data, f_peak)\n\n else:\n print('Loading peak file. groups:', sto.ls(f_peak))\n tb_grid = sto.read_object(grp_grid, f_peak)\n tb_data = sto.read_object(grp_data, f_peak) # filtered file\n # tb_data = sto.read_object(tb_raw, f_raw) # orig file\n df_grid = tb_grid.get_dataframe()\n \n # check shape of input table\n print('input table attributes:')\n for key in tb_data.keys():\n obj = tb_data[key]\n if isinstance(obj, lh5.Table):\n for key2 in obj.keys():\n obj2 = obj[key2]\n print(' ', key, key2, obj2.nda.shape, obj2.attrs)\n else:\n print(' ', key, obj.nda.shape, obj.attrs)\n\n # clear new colums if they exist\n new_cols = ['e_fit', 'fwhm_fit', 'rchisq', 'xF_err', 'fwhm_ovr_mean']\n for col in new_cols:\n if col in df_grid.columns:\n df_grid.drop(col, axis=1, inplace=True)\n\n t_start = time.time()\n def run_dsp(dfrow):\n \"\"\"\n run dsp on the test file, editing the processor list\n alternate idea: generate a long list of processors with different names\n \"\"\"\n # adjust dsp config dictionary\n rise, flat = dfrow\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = f'{tau}*us'\n dsp_config['processors']['wf_trap']['args'][1] = f'{rise}*us'\n dsp_config['processors']['wf_trap']['args'][2] = f'{flat}*us'\n # pprint(dsp_config)\n \n # run dsp\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=0)\n pc.execute()\n \n # analyze peak\n e_peak = 1460.\n etype = 'trapEmax'\n elo, ehi, epb = 4000, 4500, 3 # the peak moves around a bunch\n energy = tb_out[etype].nda\n \n # get histogram\n hE, bins, vE = pgh.get_hist(energy, range=(elo, ehi), dx=epb)\n xE = bins[1:]\n \n # should I center the max at 1460?\n\n # simple numerical width\n i_max = np.argmax(hE)\n h_max = hE[i_max]\n upr_half = xE[(xE > xE[i_max]) & (hE <= h_max/2)][0]\n bot_half = xE[(xE < xE[i_max]) & (hE >= h_max/2)][0]\n fwhm = upr_half - bot_half\n sig = fwhm / 2.355\n \n # fit to gaussian: amp, mu, sig, bkg\n fit_func = pgf.gauss_bkg\n amp = h_max * fwhm\n bg0 = np.mean(hE[:20])\n x0 = [amp, xE[i_max], sig, bg0]\n xF, xF_cov = pgf.fit_hist(fit_func, hE, bins, var=vE, guess=x0)\n\n # collect results\n e_fit = xF[0]\n xF_err = np.sqrt(np.diag(xF_cov))\n e_err = xF\n fwhm_fit = xF[1] * 2.355 * 1460. / e_fit\n \n fwhm_err = xF_err[2] * 2.355 * 1460. / e_fit\n \n chisq = []\n for i, h in enumerate(hE):\n model = fit_func(xE[i], *xF)\n diff = (model - h)**2 / model\n chisq.append(abs(diff))\n rchisq = sum(np.array(chisq) / len(hE))\n fwhm_ovr_mean = fwhm_fit / e_fit\n\n if show_movie:\n \n plt.plot(xE, hE, ds='steps', c='b', lw=2, label=f'{etype} {rise}--{flat}')\n\n # peak shape\n plt.plot(xE, fit_func(xE, *x0), '-', c='orange', alpha=0.5,\n label='init. guess')\n plt.plot(xE, fit_func(xE, *xF), '-r', alpha=0.8, label='peakshape fit')\n plt.plot(np.nan, np.nan, '-w', label=f'mu={e_fit:.1f}, fwhm={fwhm_fit:.2f}')\n\n plt.xlabel(etype, ha='right', x=1)\n plt.ylabel('Counts', ha='right', y=1)\n plt.legend(loc=2)\n\n # show a little movie\n plt.show(block=False)\n plt.pause(0.01)\n plt.cla()\n\n # return results\n return pd.Series({'e_fit':e_fit, 'fwhm_fit':fwhm_fit, 'rchisq':rchisq,\n 'fwhm_err':xF_err[0], 'fwhm_ovr_mean': fwhm_ovr_mean})\n \n # df_grid=df_grid[:10]\n df_tmp = df_grid.progress_apply(run_dsp, axis=1)\n df_grid[new_cols] = df_tmp\n # print(df_grid)\n \n if show_movie:\n plt.close()\n \n print('elapsed:', time.time() - t_start)\n if write_output:\n df_grid.to_hdf(f_results, key=grp_grid)\n print(f\"Wrote output file: {f_results}\")",
"def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)"
] | [
"0.5652331",
"0.5335536",
"0.52658623",
"0.5259198",
"0.5201728",
"0.51909906",
"0.515129",
"0.5142113",
"0.5076965",
"0.5063984",
"0.5055916",
"0.49459288",
"0.4884683",
"0.48668435",
"0.4824002",
"0.482143",
"0.47895026",
"0.47749937",
"0.47587273",
"0.4741739",
"0.4710989",
"0.46998623",
"0.46991298",
"0.46979156",
"0.4692717",
"0.4690254",
"0.46848482",
"0.46795285",
"0.46666843",
"0.46552742"
] | 0.5542998 | 1 |
Scans the given image for the 'ntraps' number of trap intensity peaks. Then extracts the 1dimensional gaussian profiles across the traps and returns a list of the amplitudes. | def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):
threshes = [0.5, 0.6]
margin = 10
threshold = np.max(image) * threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
## Fitting ##
if verbose:
print("Fitting...")
xdata = np.arange(x_len)
popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),
xdata, peak_vals, p0=params0)
if verbose:
print("Fit!")
plt.figure()
plt.plot(xdata, peak_vals) # Data
if iteration:
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit
plt.title("Iteration: %d" % iteration)
else:
plt.title("Final Product")
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
print("Fig_Newton")
trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])
return trap_powers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht",
"def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)",
"def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft",
"def gaussian_proba_map(img):\n method = 'cv2.TM_CCOEFF_NORMED'\n sigmas = [41,31,21,11]\n out = np.zeros(img.shape)\n for sigma in sigmas:\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n img2 = img.copy()\n meth = eval(method)\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n res = np.pad(res,size/2,mode='constant')\n to_replace = res>out\n out[to_replace] = res[to_replace]\n return out",
"def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T",
"def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features",
"def read_amplification(amp_file = 'dist.dat'):\n n_img, amp_img = np.loadtxt(amp_file, usecols=(0, 6), unpack=True)\n\n amp = []\n\n amp_tmp = []\n\n count = 1\n\n for i in range(len(n_img)):\n if count == n_img[i]:\n amp_tmp.append( amp_img[i] )\n else:\n amp.append(amp_tmp)\n\n amp_tmp = []\n\n amp_tmp.append( amp_img[i] )\n\n count = count + 1\n amp.append(amp_tmp)\n\n return amp",
"def cs4243_gauss_pyramid(image, n=3):\n kernel = cs4243_gaussian_kernel(7, 1)\n pyramid = []\n ## your code here####\n\n pyramid = [image]\n for i in range(n):\n gpyr_image = cs4243_filter_faster(pyramid[i], kernel)\n gpyr_image = cs4243_downsample(gpyr_image, 2)\n pyramid.append(gpyr_image)\n \n ##\n return pyramid",
"def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):\n array = np.zeros(np.shape(x))\n for k in range(ntraps):\n array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)\n return array + offset",
"def get_gaussian_ff_top(self, filenames):\n amber_ffs = []\n for fname in filenames:\n amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))\n return amber_ffs",
"def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma",
"def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals",
"def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)",
"def _get_features_from_batch_images(self, img, r, p):\n tmp_feats = []\n for channel in range(4):\n current_img = img[channel, :, :]\n tmp_feats = np.append(tmp_feats, np.histogram(current_img)[0])\n # extract 8*8 patches of 64*64 px and derive 10 bins histogram\n for j in range(r):\n for k in range(r):\n tmp_feats = np.append(\n tmp_feats,\n np.histogram(current_img[j * p:(j + 1) * (p), k *\n p:(k + 1) * p])[0])\n return tmp_feats",
"def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features",
"def repIpdTft(length,gammas,epsilon):\r\n avgRewards = []\r\n for gamma in gammas: \r\n avgRewards.append(np.mean(ipdTft(length,gamma,epsilon)))\r\n return(avgRewards)",
"def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs",
"def extract_features(\n img,\n n_sigmas,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16,\n):\n if multichannel: #img.ndim == 3 and multichannel:\n all_results = (\n extract_features_2d(\n dim,\n img[..., dim],\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n for dim in range(img.shape[-1])\n )\n features = list(itertools.chain.from_iterable(all_results))\n else:\n features = extract_features_2d(0,\n img,\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Feature extraction complete')\n\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n logging.info('Memory mapping features to temporary file')\n\n features = memmap_feats(features)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return features #np.array(features)",
"def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }",
"def find_peaks(f_arr, sigma, niter, bsigma=None):\n # set up the variables\n if bsigma is None:\n bsigma = sigma\n\n # determine the background statistics\n back_ave, back_std = find_backstats(f_arr, sigma, niter)\n\n # calculate the differences between the pixels\n dfh = f_arr[1:-1] - f_arr[:-2]\n dfl = f_arr[1:-1] - f_arr[2:]\n\n # find the objects\n mask = (dfh > 0) * (dfl > 0) * \\\n (abs(f_arr[1:-1] - back_ave) > back_std * sigma)\n t = np.where(mask)[0]\n return t + 1",
"def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)",
"def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all",
"def tapered_spectra(s, tapers, NFFT=None, low_bias=True):\r\n N = s.shape[-1]\r\n # XXX: don't allow NFFT < N -- not every implementation is so restrictive!\r\n if NFFT is None or NFFT < N:\r\n NFFT = N\r\n rest_of_dims = s.shape[:-1]\r\n M = int(np.product(rest_of_dims))\r\n\r\n s = s.reshape(int(np.product(rest_of_dims)), N)\r\n # de-mean this sucker\r\n s = utils.remove_bias(s, axis=-1)\r\n\r\n if not isinstance(tapers, np.ndarray):\r\n # then tapers is (NW, K)\r\n args = (N,) + tuple(tapers)\r\n dpss, eigvals = dpss_windows(*args)\r\n if low_bias:\r\n keepers = (eigvals > 0.9)\r\n dpss = dpss[keepers]\r\n eigvals = eigvals[keepers]\r\n tapers = dpss\r\n else:\r\n eigvals = None\r\n K = tapers.shape[0]\r\n sig_sl = [slice(None)] * len(s.shape)\r\n sig_sl.insert(len(s.shape) - 1, np.newaxis)\r\n\r\n # tapered.shape is (M, Kmax, N)\r\n tapered = s[sig_sl] * tapers\r\n\r\n # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking\r\n # results of real-valued FFT eats up memory\r\n t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)\r\n t_spectra.shape = rest_of_dims + (K, NFFT)\r\n if eigvals is None:\r\n return t_spectra\r\n return t_spectra, eigvals",
"def process_noise(qubit, tstep, noise_samples, sigma_array):\n from scipy.stats import norm\n noise_weights = np.zeros((len(sigma_array), len(noise_samples)))\n average_chi_array = np.zeros((len(sigma_array), 9,9), dtype=complex)\n raw_chi_array = noise_iteration(qubit, tstep, noise_samples)\n for i in range(len(sigma_array)):\n noise_weights[i, :] += norm.pdf(noise_samples, loc=0.0, scale=sigma_array[i])\n average_chi_array[i, :, :] += noise_averaging(noise_samples, noise_weights[i, :], raw_chi_array)\n return average_chi_array, raw_chi_array",
"def wrapper_fit_func(x, ntraps, *args):\n a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])\n offset = args[0][-1]\n return gaussianarray1d(x, a, b, c, offset, ntraps)",
"def addNoise_amp(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.copy(array)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = np.square(normalise(array))\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n arrayout = np.sqrt(arrayout)\r\n tot = np.sum(np.abs(array)**2)\r\n arrayout = normalise(arrayout,tot)\r\n return arrayout",
"def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")",
"def pick_triplets_images(images, n_triplets):\n\n indices = _pick_triplets(len(images), n_triplets)\n\n n_samples = len(indices)\n\n n_rows, n_cols, n_channels = images[0].shape\n\n images_samples = np.zeros((n_samples,n_rows, n_cols, n_channels), dtype = np.uint8)\n\n for i, index in enumerate(indices):\n images_samples[i] = images[index]\n\n return images_samples",
"def extract_features_2d(\n dim,\n img,\n n_sigmas,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16\n):\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features from channel %i' % (dim))\n\n # computations are faster as float32\n img = img_as_float32(img)\n\n sigmas = np.logspace(\n np.log2(sigma_min),\n np.log2(sigma_max),\n num=n_sigmas, #int(np.log2(sigma_max) - np.log2(sigma_min) + 1),\n base=2,\n endpoint=True,\n )\n\n if (psutil.virtual_memory()[0]>10000000000) & (psutil.virtual_memory()[2]<50): #>10GB and <50% utilization\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in parallel')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n all_results = Parallel(n_jobs=-2, verbose=0)(delayed(features_sigma)(img, sigma, intensity=intensity, edges=edges, texture=texture) for sigma in sigmas)\n else:\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in series')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n n_sigmas = len(sigmas)\n all_results = [\n features_sigma(img, sigma, intensity=intensity, edges=edges, texture=texture)\n for sigma in sigmas\n ]\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Features from channel %i for all scales' % (dim))\n\n return list(itertools.chain.from_iterable(all_results))",
"def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t"
] | [
"0.587632",
"0.52485836",
"0.5228704",
"0.5138485",
"0.5076651",
"0.50504184",
"0.5020896",
"0.49921283",
"0.49873865",
"0.4987357",
"0.49527085",
"0.48427442",
"0.48109403",
"0.4805407",
"0.47939897",
"0.47633266",
"0.47571003",
"0.47376722",
"0.47331765",
"0.47273827",
"0.470503",
"0.46984622",
"0.46977895",
"0.46950454",
"0.46633723",
"0.4635799",
"0.46310976",
"0.4594745",
"0.4588485",
"0.45748767"
] | 0.5898754 | 0 |
Given the opened camera object and the Slider object connected to the camera's exposure, adjusts the exposure to just below clipping. Binary Search | def fix_exposure(cam, slider, verbose=False):
margin = 10
exp_t = MAX_EXP / 2
cam._set_exposure(exp_t * u.milliseconds)
time.sleep(0.5)
print("Fetching Frame")
im = cam.latest_frame()
x_len = len(im)
right, left = MAX_EXP, 0
inc = right / 10
for _ in range(10):
## Determine if Clipping or Low-Exposure ##
gap = 255
for i in range(x_len):
if i < margin or x_len - i < margin:
continue
else:
gap = min(255 - max(im[i]), gap)
## Make Appropriate Adjustment ##
if gap == 0:
if verbose:
print("Clipping at: ", exp_t)
right = exp_t
elif gap > 50:
if verbose:
print("Closing gap: ", gap, " w/ exposure: ", exp_t)
left = exp_t
else:
if verbose:
print("Final Exposure: ", exp_t)
return
if inc < 0.01:
exp_t -= inc if gap == 0 else -inc
else:
exp_t = (right + left) / 2
inc = (right - left) / 10
slider.set_val(exp_t)
time.sleep(1)
im = cam.latest_frame() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_exposure(self, expo):\n if expo == 0:\n self.exposure = 0\n elif expo == 1:\n self.exposure = min(9, self.exposure+1)\n elif expo == -1:\n self.exposure = max(-9, self.exposure-1)\n self.drone.set_exposure(self.exposure)\n log.info(f\"EXPOSURE {self.exposure}\")",
"def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')",
"def roi_ko(self):\n self.pressure_img.mask = self.pressure_img.previous_roi",
"def process_image(self, image):\n #Resize and blur the image, put into HSV color scale, and create an image mask \n img_small = cv2.resize(image, None, fx=self.subsample_ratio, fy=self.subsample_ratio, interpolation=cv2.INTER_LINEAR) \n img_blur = cv2.GaussianBlur(img_small, (5,5), 0)\n img_hsv = cv2.cvtColor(img_blur, cv2.COLOR_BGR2HSV)\n mask_l = cv2.inRange(img_hsv, self.hsv_lower_lower, self.hsv_lower_upper)\n mask_u = cv2.inRange(img_hsv, self.hsv_upper_lower, self.hsv_upper_upper)\n mask = cv2.bitwise_or(mask_l, mask_u)\n\n #Publish the mask\n mask_bgr8 = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)\n bridge = CvBridge()\n cv_mask = bridge.cv2_to_imgmsg(mask_bgr8, encoding='bgr8')\n self.pub.publish(cv_mask)\n\n #find the largest contour of the mask or return 0 if target is not there\n img, cnts, cnt_hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n if len(cnts) == 0:\n return (0, (0,0))\n c = max(cnts, key=cv2.contourArea)\n\n #find the height of the target object and its center using minAreaRect\n rect = cv2.minAreaRect(c)\n height_px = rect[1][1] / self.subsample_ratio\n target_x = rect[0][0] / self.subsample_ratio\n target_y = rect[0][1] / self.subsample_ratio\n offset_px = (target_x - self.image_center[0]) , -1.0*(target_y - self.image_center[1])\n\n #NOTE!! When using a ball shaped object, use minEnclosingCircle and the circle diameter\n #enc_circle = 2 * cv2.minEnclosingCircle(c)[1]\n #height_px = 2 * enc_circle[1]\n #offset_px = (enc_circle[0][0] - self.image_center[0]) , -1*(enc_circle[0][1] - self.image_center[1])\n\n return height_px, offset_px",
"def enable_lower_center_roi_auto_exposure(image_width, image_height, hid_handle, win_size=4):\n outputLow = 0\n outputHigh = 255\n\n # Convert RoI center position to 0-255 value\n inputXLow = 0\n inputXHigh = image_width - 1\n inputXCord = int(image_width / 2)\n outputXCord = int(((inputXCord - inputXLow) / (inputXHigh - inputXLow)) * (outputHigh - outputLow) + outputLow)\n\n inputYLow = 0\n inputYHigh = image_height - 1\n inputYCord = int(image_height * 3 / 4)\n outputYCord = int(((inputYCord - inputYLow) / (inputYHigh - inputYLow)) * (outputHigh - outputLow) + outputLow)\n\n input_buffer = bytearray([0] * BUFFER_LENGTH)\n input_buffer[1] = CAMERA_CONTROL_CU20\n input_buffer[2] = SET_AE_ROI_MODE_CU20\n input_buffer[3] = AutoExpManual\n input_buffer[4] = outputXCord\n input_buffer[5] = outputYCord\n input_buffer[6] = win_size\n\n hid_write(hid_handle, input_buffer)\n output_buffer = hid_read(hid_handle)\n\n if output_buffer[6] == 0x00:\n print(\"\\nEnabling AutoExposure(RoI based) is failed\\n\")\n return False\n elif (\n output_buffer[0] == CAMERA_CONTROL_CU20\n and output_buffer[1] == SET_AE_ROI_MODE_CU20\n and output_buffer[6] == SUCCESS\n ):\n print(\"\\nAutoExposure(RoI based) is enabled\\n\")\n return True",
"def enable_roi_auto_exposure(xcord, ycord, image_width, image_height, hid_handle, win_size=4):\n outputLow = 0\n outputHigh = 255\n\n # Convert RoI center position to 0-255 value\n inputXLow = 0\n inputXHigh = image_width - 1\n inputXCord = xcord\n outputXCord = int(((inputXCord - inputXLow) / (inputXHigh - inputXLow)) * (outputHigh - outputLow) + outputLow)\n\n inputYLow = 0\n inputYHigh = image_height - 1\n inputYCord = ycord\n outputYCord = int(((inputYCord - inputYLow) / (inputYHigh - inputYLow)) * (outputHigh - outputLow) + outputLow)\n\n input_buffer = bytearray([0] * BUFFER_LENGTH)\n input_buffer[1] = CAMERA_CONTROL_CU20\n input_buffer[2] = SET_AE_ROI_MODE_CU20\n input_buffer[3] = AutoExpManual\n input_buffer[4] = outputXCord\n input_buffer[5] = outputYCord\n input_buffer[6] = win_size\n\n hid_write(hid_handle, input_buffer)\n output_buffer = hid_read(hid_handle)\n\n if output_buffer[6] == 0x00:\n print(\"\\nEnabling AutoExposure(RoI based) is failed\\n\")\n return False\n elif (\n output_buffer[0] == CAMERA_CONTROL_CU20\n and output_buffer[1] == SET_AE_ROI_MODE_CU20\n and output_buffer[6] == SUCCESS\n ):\n print(\"\\nAutoExposure(RoI based) is enabled\\n\")\n return True",
"def reset_camera_clipping_range(self):\n self.ResetCameraClippingRange()",
"def exposureCallback(self, config):\n rospy.loginfo('Set exposure: {}'.format(config['exposure']))",
"def autoExpose(camera,\r\n target_level=245,\r\n adjust_shutter=True,\r\n adjust_gain=True):\r\n\r\n if target_level <= 0 or target_level >= 255:\r\n raise ValueError(\"Target level must be value between in the range\"\r\n \"]0,255[ !\")\r\n\r\n # There must be something to adjust\r\n if ~adjust_shutter and ~adjust_gain:\r\n raise ValueError(\"At one of the variables must be adjustable!\")\r\n\r\n while True:\r\n # Grab frame\r\n image = camera.retrieveBuffer()\r\n image = image.convert(PyCapture2.PIXEL_FORMAT.RAW8)\r\n data = image.getData()\r\n\r\n # Grab current camera properties\r\n shutter = camera.getProperty(PyCapture2.PROPERTY_TYPE.SHUTTER).absValue\r\n gain = camera.getProperty(PyCapture2.PROPERTY_TYPE.GAIN).absValue\r\n\r\n # Exposition adjustment\r\n max_val = np.max(data)\r\n print(\"Shutter = {0:.2f}[ms], Gain = {1:.1f}[db],\"\r\n \"Max pixel value = {2:d} \".format(shutter, gain, max_val),\r\n end='\\r')\r\n\r\n if max_val == max:\r\n if gain == 0 or ~adjust_shutter:\r\n if shutter > 0.1:\r\n shutter = max(0.1, shutter * (1 + _dShutter))\r\n else:\r\n gain = max(0, gain - _dGain)\r\n\r\n elif max_val < min:\r\n if shutter < 8:\r\n shutter = min(8.1, shutter / (1 + _dShutter))\r\n else:\r\n gain += _dGain\r\n else:\r\n break\r\n\r\n # Update camera parameters\r\n if autoExpose:\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER,\r\n autoManualMode=False, absValue=shutter)\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN,\r\n autoManualMode=False, absValue=gain)",
"def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)",
"def constrain_roi(self, frame):\n raise NotImplementedError",
"def endexposureloop(self):\n self.max_exposures = self.current_exposure",
"def adjust(self, image):\n ...",
"def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)",
"def filter_mentor_advise(image):\n HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n # For yellow\n yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))\n\n # For white\n sensitivity_1 = 68\n white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))\n\n sensitivity_2 = 60\n HSL = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))\n white_3 = cv2.inRange(image, (200,200,200), (255,255,255))\n\n bit_layer = yellow | white | white_2 | white_3\n\n return bit_layer",
"def mouseRange(event, x, y, flags, param):\n \n #If the left button was clicked\n if event==cv.CV_EVENT_LBUTTONDOWN:\n print \"x, y are\", x, y\n pixel_val= D.image[y,x]\n print \"the pixel's depth value is\", pixel_val\n if D.mode == \"setLeft\":\n D.dot1 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setRight\":\n D.dot2 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setTop\":\n D.dot3 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setDown\":\n D.dot4 = (x,y)\n D.mode = D.lastmode",
"def setup_hsv_boundaries():\n global l_hsv_thresh, u_hsv_thresh\n cv2.destroyAllWindows()\n l_hsv_thresh, u_hsv_thresh = prompt_calibration()\n cv2.destroyAllWindows()",
"def find_components_old(image,deltaPix,lens_rad_arcsec = 5.0,lens_rad_ratio = None, gal_rad_ratio = 0.1,min_size_arcsec=0.3,thresh=0.4, show_locations=False):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n filtered = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.)\n \n# print(filtered.min(),filtered.max(),filtered.min() + thresh * np.abs(filtered.min()))\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n \n if show_locations:\n plt.figure(figsize = (8,8))\n plt.subplot(1,2,1)\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n plt.title('Image')\n\n plt.subplot(1,2,2)\n plt.imshow(filtered, origin='lower', norm=SymLogNorm(5))\n plt.title('Filtered Image')\n plt.show()\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0)\n max_idx_2d_large = peak_local_max(filtered, min_distance=1)\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2.\n \n R = np.sqrt((x_list_large - im_center_x)**2 + (y_list_large - im_center_y)**2)\n new_center_x, new_center_y = x_list_large[R < gal_rad], y_list_large[R < gal_rad]\n \n if (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) ==1 ): \n new_center_x, new_center_y = x_list_large[R == R.min()], y_list_large[R == R.min()]\n elif (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) > 1 ): \n new_center_x, new_center_y = im_center_x, im_center_y\n elif len(new_center_x) == 0: \n new_center_x, new_center_y = im_center_x, im_center_y\n \n \n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2)\n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n # show maxima on image for debug\n if show_locations:\n fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n plt.scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n plt.scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n plt.gcf().gca().add_artist(draw_lens_circle)\n plt.gcf().gca().add_artist(draw_gal_circle)\n plt.title('Detected Components')\n plt.text(1, 1, \"detected components\", color='red')\n fig.axes[0].get_xaxis().set_visible(True); fig.axes[0].get_yaxis().set_visible(True)\n plt.show()\n return (x_sats, y_sats), (new_center_x, new_center_y)",
"def clip(self, image, x=0, y=0, w=0, h=0, oX=0, oY=0):\n if(w==0):\n w = image.get_rect()[2]\n if(h==0):\n h = image.get_rect()[3]\n needleW = w + 2*math.sqrt(oX*oX)\n needleH = h + 2*math.sqrt(oY*oY)\n imageOut = pygame.Surface((needleW, needleH))\n imageOut.fill((255,255,0))\n imageOut.set_colorkey((255,255,0))\n imageOut.blit(image, (needleW/2-w/2+oX, needleH/2-h/2+oY), pygame.Rect(x,y,w,h))\n return imageOut",
"def ring_ext(self, tissue):\n print(\"controller - ring_ext!\")\n img_cv2_mask = self.pressure_img.mask\n self.pressure_img.roi_crop(img_cv2_mask, tissue, 1)",
"def configure_exposure(cam,exposure):\n\n #print(\"*** CONFIGURING EXPOSURE ***\\n\")\n\n try:\n result = True\n\n # Turn off automatic exposure mode\n #\n # *** NOTES ***\n # Automatic exposure prevents the manual configuration of exposure\n # times and needs to be turned off for this example. Enumerations\n # representing entry nodes have been added to QuickSpin. This allows\n # for the much easier setting of enumeration nodes to new values.\n #\n # The naming convention of QuickSpin enums is the name of the\n # enumeration node followed by an underscore and the symbolic of\n # the entry node. Selecting \"Off\" on the \"ExposureAuto\" node is\n # thus named \"ExposureAuto_Off\".\n #\n # *** LATER ***\n # Exposure time can be set automatically or manually as needed. This\n # example turns automatic exposure off to set it manually and back\n # on to return the camera to its default state.\n\n \n\n # Set exposure time manually; exposure time recorded in microseconds\n #\n # *** NOTES ***\n # Notice that the node is checked for availability and writability\n # prior to the setting of the node. In QuickSpin, availability and\n # writability are ensured by checking the access mode.\n #\n # Further, it is ensured that the desired exposure time does not exceed\n # the maximum. Exposure time is counted in microseconds - this can be\n # found out either by retrieving the unit with the GetUnit() method or\n # by checking SpinView.\n\n if cam.ExposureTime.GetAccessMode() != PySpin.RW:\n print(\"Unable to set exposure time. Aborting...\")\n return False\n\n # Ensure desired exposure time does not exceed the maximum\n exposure_time_to_set = exposure\n exposure_time_to_set = min(cam.ExposureTime.GetMax(), exposure_time_to_set)\n cam.ExposureTime.SetValue(exposure_time_to_set)\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n return result",
"def changeExposure(cam=0, increment=None, value=None):\n try:\n if increment is not None:\n exposure = commands.getoutput(\"v4l2-ctl -d {} --get-ctrl exposure_absolute\".format(cam)).split()[1]\n exposure = int(exposure)\n exposure = max(0, exposure + increment)\n elif value is not None:\n exposure = max(0, value)\n else:\n raise Exception(\"increment or value must be an integer\")\n commands.getoutput(\"v4l2-ctl -d {} --set-ctrl exposure_absolute={}\".format(cam, exposure))\n print \"Exposure {}\".format(exposure)\n except Exception as e:\n print \"Failed to change exposure: {}\".format(e)",
"def calibrate(cap, location):\n\n #Poisition and size of sensor\n [x, y, h, w] = location\n\n #show square to user and wait for key\n print(\"please, step away to clear the blue square displayed on screen and press q to continue\")\n while True:\n ret, frame = cap.read()\n cv2.namedWindow('Calibrate',cv2.WINDOW_NORMAL)\n show = cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0) , 5)\n cv2.imshow('Calibrate', show)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n #get first image, process and define window previous for iteration\n ret, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n previous = frame[y:y+w,x:x+h]\n\n #set parameters for mean value of sensor, kernel of erode function,\n sampleNbMean = 50\n xi = np.empty((0, sampleNbMean))\n kernel = np.ones((5,5), np.uint8)\n\n #iterate over each frame until sample number\n for iteration in range(sampleNbMean):\n\n # Capture frame, draw the window and display to the user\n ret, frame = cap.read()\n # Image operation\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n\n #get present window\n present = frame[y:y+w,x:x+h]\n\n #add sample for mean, add diference of window with prieviuos\n xi = np.append(xi,\n np.sum(\n cv2.erode(\n cv2.bitwise_xor(present,previous), kernel, iterations=1)))\n\n #present image becomes previous before steping into next image\n previous = present\n\n #mean\n mean = np.sum(xi)/len(xi)\n\n #sigma\n sum = 0\n for sample in xi:\n sum += np.power(sample - mean, 2)\n sigma = np.sqrt(sum/len(xi))\n\n #close window\n cv2.destroyWindow('Calibrate')\n\n return mean, sigma",
"def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])",
"def adjust_thresholding(self, pos_frame, which='animal'):\n\n cv2.namedWindow('Adjust Thresholding')\n if which == 'animal':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][2],\n 255,\n self.nothing)\n elif which == 'material':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][2],\n 255,\n self.nothing)\n else:\n utils.print_color_message(\"[INFO] Select 'animal' or 'material' to preview the default thresholding values\",\n \"darkgreen\")\n cv2.createTrackbar('H_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding', 255, 255, self.nothing)\n\n test_frame = self._color_capture.get_frame(pos_frame)\n test_frame_cropped = test_frame[self.up_left_y:self.low_right_y, self.up_left_x:self.low_right_x]\n test_frame_cropped_hsv = cv2.cvtColor(test_frame_cropped, cv2.COLOR_BGR2HSV)\n test_frame_blurred = cv2.blur(test_frame_cropped_hsv, (5, 5))\n\n while True:\n h_l = cv2.getTrackbarPos('H_Low', 'Adjust Thresholding')\n h_h = cv2.getTrackbarPos('H_High', 'Adjust Thresholding')\n s_l = cv2.getTrackbarPos('S_Low', 'Adjust Thresholding')\n s_h = cv2.getTrackbarPos('S_High', 'Adjust Thresholding')\n v_l = cv2.getTrackbarPos('V_Low', 'Adjust Thresholding')\n v_h = cv2.getTrackbarPos('V_High', 'Adjust Thresholding')\n test_mask_mouse = cv2.inRange(test_frame_blurred, (h_l, s_l, v_l), (h_h, s_h, v_h))\n overlay = cv2.bitwise_and(test_frame_cropped_hsv, test_frame_cropped_hsv, mask=test_mask_mouse)\n cv2.imshow('Adjust Thresholding', overlay)\n key = cv2.waitKey(10) & 0xFF\n if key == ord(\"q\"):\n break\n cv2.destroyAllWindows()\n for i in range(1, 5):\n cv2.waitKey(1)",
"def trackObject(img, lower, upper):\n\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\tlower_col = np.array(lower)\n\tupper_col = np.array(upper)\n\tmask = cv2.inRange(hsv, lower_col, upper_col)\n\tres = cv2.bitwise_and(img, img, mask=mask)\n\treturn res",
"def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize",
"def _get_closeup(self, idx):\n img_arr = p.getCameraImage(width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix)\n rgb = img_arr[2]\n depth = img_arr[3]\n min = 0.97\n max=1.0\n segmentation = img_arr[4]\n depth = np.reshape(depth, (self._height, self._width,1) )\n segmentation = np.reshape(segmentation, (self._height, self._width,1) )\n\n np_img_arr = np.reshape(rgb, (self._height, self._width, 4))\n np_img_arr = np_img_arr[:, :, :3].astype(np.float64)\n\n view_mat = np.asarray(self._view_matrix).reshape(4, 4)\n proj_mat = np.asarray(self._proj_matrix).reshape(4, 4)\n # pos = np.reshape(np.asarray(list(p.getBasePositionAndOrientation(self._objectUids[0])[0])+[1]), (4, 1))\n\n AABBs = np.zeros((len(self._objectUids), 2, 3))\n cls_ls = []\n \n for i, (_uid, _cls) in enumerate(zip(self._objectUids, self._objectClasses)):\n AABBs[i] = np.asarray(p.getAABB(_uid)).reshape(2, 3)\n cls_ls.append(NAME2IDX[_cls])\n\n # np.save('/home/tony/Desktop/obj_save/view_mat_'+str(self.img_save_cnt), view_mat)\n # np.save('/home/tony/Desktop/obj_save/proj_mat_'+str(self.img_save_cnt), proj_mat)\n # np.save('/home/tony/Desktop/obj_save/img_'+str(self.img_save_cnt), np_img_arr.astype(np.int16))\n # np.save('/home/tony/Desktop/obj_save/AABB_'+str(self.img_save_cnt), AABBs)\n # np.save('/home/tony/Desktop/obj_save/class_'+str(self.img_save_cnt), np.array(cls_ls))\n\n np.save(OUTPUT_DIR + '/closeup_' + str(self.img_save_cnt - 1) + '_' + str(idx), np_img_arr.astype(np.int16))\n dets = np.zeros((AABBs.shape[0], 5))\n for i in range(AABBs.shape[0]):\n dets[i, :4] = self.get_2d_bbox(AABBs[i], view_mat, proj_mat, IM_HEIGHT, IM_WIDTH)\n dets[i, 4] = int(cls_ls[i])\n # np.save(OUTPUT_DIR + '/annotation_'+str(self.img_save_cnt), dets)\n\n test = np.concatenate([np_img_arr[:, :, 0:2], segmentation], axis=-1)\n\n return test",
"def testMatchSwarpBilinearExposure(self):\n self.compareToSwarp(\"bilinear\", useWarpExposure=True,\n useSubregion=False, useDeepCopy=True)",
"def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)"
] | [
"0.59731835",
"0.5947797",
"0.5704381",
"0.5572308",
"0.5527399",
"0.54456806",
"0.54195607",
"0.5414224",
"0.5396532",
"0.53867406",
"0.5374971",
"0.5353075",
"0.52843153",
"0.52828944",
"0.52818215",
"0.52474177",
"0.5239085",
"0.5233961",
"0.52310765",
"0.52299416",
"0.52266705",
"0.5224868",
"0.5223007",
"0.5221212",
"0.5221138",
"0.5214281",
"0.5205405",
"0.52037317",
"0.519206",
"0.5187612"
] | 0.71824807 | 0 |
Fetches prediction field from prediction byte array. After TensorRT inference, prediction data is saved in byte array and returned by object detection network. This byte array contains several pieces of data about prediction we call one such piece a prediction field. The prediction fields layout is described in TRT_PREDICTION_LAYOUT. This function, given prediction byte array returned by network, staring index of given prediction and field name of interest, returns prediction field data corresponding to given arguments. | def fetch_prediction_field(field_name, detection_out, pred_start_idx):
return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_predict(path=MODEL_PATH, version=VERSION, namePredictor=DEFAULT_PREDICTOR):\n logging.info(\"trying to load {}\".format(path + namePredictor + version + '.npz'))\n return np.load(path + namePredictor + version + '.npz')['pred']",
"async def predict(params: predict_text):\n tweet = params.text\n prediction = tf_model.predict(tweet)\n prediction_db = PredictionModel(\n text=tweet,\n label=prediction[\"label\"],\n score=prediction[\"score\"],\n time=prediction[\"elapsed_time\"],\n )\n db.session.add(prediction_db)\n db.session.commit()\n return prediction",
"def predict_raw(data_gen, index, partition, model):\n\n if partition == 'validation':\n transcr = data_gen.texts_valid[index]\n audio_path = \"\"\n data_point=data_gen.features_valid[index].T\n elif partition == 'train':\n transcr = data_gen.texts[index]\n # audio_path = data_gen.train_audio_paths[index]\n # data_point = data_gen.normalize(data_gen.featurize(audio_path))\n audio_path=\"\"\n data_point=data_gen.features[index].T\n else:\n raise Exception('Invalid partition! Must be \"train\" or \"validation\"')\n \n prediction = model.predict(np.expand_dims(data_point, axis=0))\n return (audio_path,data_point,transcr,prediction)",
"def get_fvlm_predict_fn(serving_batch_size):\n num_classes, text_dim = load_fvlm_gin_configs()\n predict_step = create_predict_step()\n anchor_boxes, image_info = generate_anchors_info()\n\n def predict_fn(params, input_dict):\n input_dict['labels'] = {\n 'detection': {\n 'anchor_boxes': anchor_boxes,\n 'image_info': image_info,\n }\n }\n output = predict_step(params, input_dict, jax.random.PRNGKey(0))\n output = output['detection']\n output.pop('rpn_score_outputs')\n output.pop('rpn_box_outputs')\n output.pop('class_outputs')\n output.pop('box_outputs')\n return output\n\n input_signatures = {\n 'image':\n tf.TensorSpec(\n shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,\n 3),\n dtype=tf.bfloat16,\n name='image'),\n 'text':\n tf.TensorSpec(\n shape=(serving_batch_size, num_classes, text_dim),\n dtype=tf.float32,\n name='queries'),\n }\n return predict_fn, input_signatures",
"def predict(predictor, inputs):\n predictor_type = type(predictor).__name__\n\n # Standard tensorflow predictor\n if predictor_type in [\"tf.estimator.predictor\", \"SavedModelPredictor\"]:\n return predictor(inputs)[\"top_k\"]\n\n # Python based endpoint\n elif predictor_type == \"sagemaker.tensorflow.model.TensorFlowPredictor\":\n prediction = predictor.predict(inputs)\n top_k = prediction[\"outputs\"][\"top_k\"]\n output_shape = [y[\"size\"] for y in top_k[\"tensor_shape\"][\"dim\"]]\n output_val = np.array(top_k[\"int_val\"]).reshape(*output_shape)\n return output_val\n\n # Tensorflow serving based endpoint\n elif predictor_type in [\"sagemaker.tensorflow.serving.Predictor\", \"Predictor\"]:\n prediction = predictor.predict(inputs)\n return np.array(prediction[\"predictions\"])\n else:\n print(\"Predict method failed. Supplied predictor type {} not supported.\".format(predictor_type))",
"def predict(self):\n for src_p, pair in enumerate(self.pairs):\n dst_p = pair[1].argmax()\n dst_ind = pair[0][dst_p]\n\n self.vector_field.append(np.hstack([self.frame_0[src_p], self.frame_1[dst_ind]]))\n\n self.vector_field = np.vstack(self.vector_field)\n\n return self.vector_field",
"def predict(self, testloader, field=None):\n model_name = str(field).lower()\n\n assert field == HOME or field == AWAY, 'ERROR - model predict: WRONG model name. Give \"home\" or \"away\"'\n\n preds = {}\n\n for i, model in enumerate(self.models):\n if (model_name == HOME):\n # logger.info('> Calling Home Network')\n field_net = model.model.home_network\n elif (model_name == AWAY):\n # logger.info('> Calling Away Network')\n field_net = model.model.away_network\n else:\n raise ValueError('Model - predict: Wrong model name')\n\n model_preds = []\n with torch.no_grad():\n\n for x in testloader:\n x = torch.Tensor(x).to(self.device)\n out = field_net(x)\n\n out = out.squeeze()\n\n model_preds.append(out.item())\n\n preds[i] = model_preds\n\n return preds[i]",
"def predict(self, data, version='default'):\n return self.skil.api.transformarray(\n deployment_name=self.deployment.name,\n transform_name=self.model_name,\n version_name=version,\n batch_record=data\n )",
"def predict(self, image_or_filename: Union[np.ndarray, str]) -> Tuple[str, float]:\n if isinstance(image_or_filename, str):\n image = util.load_image(image_or_filename)\n else:\n image = image_or_filename\n return self.model.predict(image, batch_size=8)\n # return self.model.predict_on_image(image)",
"def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()",
"def _deserialize_single_field(\n example_data, tensor_info: feature_lib.TensorInfo\n):\n # Ragged tensor case:\n if tensor_info.sequence_rank > 1:\n example_data = _dict_to_ragged(example_data, tensor_info)\n\n # Restore shape if possible. TF Example flattened it.\n elif tensor_info.shape.count(None) < 2:\n shape = [-1 if i is None else i for i in tensor_info.shape]\n example_data = tf.reshape(example_data, shape)\n\n # Restore dtype\n if example_data.dtype != tensor_info.tf_dtype:\n example_data = tf.dtypes.cast(example_data, tensor_info.tf_dtype)\n return example_data",
"def predict(net, input, fields):\n net.eval()\n example = torch_data.Example.fromlist(input, fields)\n dataset = torch_data.Dataset([example])\n iterator = torch_data.Iterator(dataset, batch_size=1)\n net_in = next(iter(iterator))\n return predict_batch(net, net_in)",
"def predict_from_model(patch, model):\n\n prediction = model.predict(patch.reshape(1, 256, 256, 3))\n prediction = prediction[:, :, :, 1].reshape(256, 256)\n return prediction",
"def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction",
"def decode_prediction(self, prediction):\n index = np.argmax(prediction)\n\n inv_map = {v: k for k, v in self.class_index.items()}\n label = inv_map[index]\n return label, np.amax(prediction)",
"def load_predict_byname(filename, path=MODEL_PATH):\n full_path = os.path.join(path, filename)\n logging.info(\"trying to load {}\".format(full_path))\n return np.load(os.path.join(path, filename))['pred']",
"def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred",
"def tta_predict(learner, im_arr):\n # Note: we are not using the TTA method built into fastai because it only\n # works on image classification problems (and this is undocumented).\n # We should consider contributing this upstream to fastai.\n probs = []\n for k in range(8):\n trans_im = dihedral(Image(im_arr), k)\n o = learner.predict(trans_im)[2]\n # https://forums.fast.ai/t/how-best-to-have-get-preds-or-tta-apply-specified-transforms/40731/9\n o = Image(o)\n if k == 5:\n o = dihedral(o, 6)\n elif k == 6:\n o = dihedral(o, 5)\n else:\n o = dihedral(o, k)\n probs.append(o.data)\n\n label_arr = torch.stack(probs).mean(0).argmax(0).numpy()\n return label_arr",
"def prediction(self, x):\n t = self.model.predict(x.reshape(1, -1))\n return t",
"def _extract_prediction_tensors(model,\n create_input_dict_fn,\n ignore_groundtruth=False):\n input_dict = create_input_dict_fn()\n prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)\n input_dict = prefetch_queue.dequeue()\n original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)\n preprocessed_image = model.preprocess(tf.to_float(original_image))\n prediction_dict = model.predict(preprocessed_image)\n detections = model.postprocess(prediction_dict)\n\n groundtruth = None\n if not ignore_groundtruth:\n groundtruth = {\n fields.InputDataFields.groundtruth_boxes:\n input_dict[fields.InputDataFields.groundtruth_boxes],\n fields.InputDataFields.groundtruth_classes:\n input_dict[fields.InputDataFields.groundtruth_classes],\n fields.InputDataFields.groundtruth_area:\n input_dict[fields.InputDataFields.groundtruth_area],\n fields.InputDataFields.groundtruth_is_crowd:\n input_dict[fields.InputDataFields.groundtruth_is_crowd],\n fields.InputDataFields.groundtruth_difficult:\n input_dict[fields.InputDataFields.groundtruth_difficult]\n }\n if fields.InputDataFields.groundtruth_group_of in input_dict:\n groundtruth[fields.InputDataFields.groundtruth_group_of] = (\n input_dict[fields.InputDataFields.groundtruth_group_of])\n if fields.DetectionResultFields.detection_masks in detections:\n groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (\n input_dict[fields.InputDataFields.groundtruth_instance_masks])\n\n return eval_util.result_dict_for_single_example(\n original_image,\n input_dict[fields.InputDataFields.source_id],\n detections,\n groundtruth,\n class_agnostic=(\n fields.DetectionResultFields.detection_classes not in detections),\n scale_to_absolute=True)",
"def predict(request):\n request_json = request.get_json()\n if request_json and 'review_body' in request_json:\n content = request_json['review_body'] # TODO add review_summary\n prediction = get_prediction(\n content, 'projects/207895552307/locations/us-central1/models/TCN5004391989450375168')\n classifications = []\n return MessageToJson(prediction)\n else:\n return f'ERROR: Missing review_body!'",
"def predict(self, compound, spacegroup, T):\n\n prediction = self.model.predict(self._transform_input(compound,\n spacegroup, T))\n return float(prediction)",
"def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)",
"def _get_prediction(self):\n raise NotImplementedError",
"def read(self, field_name):\n field = self.mem_map.get_field(field_name)\n raw_data = self.reader(field.get_offset(), field.get_size())\n if raw_data:\n deps = field.get_deps()\n decoded_deps = {dep: self.read(dep) for dep in deps}\n return field.decode(raw_data, **decoded_deps)\n return None",
"def get_field_from_dict(example_dict, field_name, height_m_agl=None):\n\n check_field_name(field_name)\n\n if field_name in ALL_SCALAR_PREDICTOR_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_PREDICTOR_VALS_KEY][..., field_index]\n elif field_name in ALL_SCALAR_TARGET_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_TARGET_VALS_KEY][..., field_index]\n elif field_name in ALL_VECTOR_PREDICTOR_NAMES:\n field_index = example_dict[VECTOR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_PREDICTOR_VALS_KEY][..., field_index]\n else:\n field_index = example_dict[VECTOR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_TARGET_VALS_KEY][..., field_index]\n\n if height_m_agl is None:\n return data_matrix\n\n height_index = match_heights(\n heights_m_agl=example_dict[HEIGHTS_KEY],\n desired_height_m_agl=height_m_agl\n )\n\n return data_matrix[..., height_index]",
"def get_aux_fields(prediction_dict, example_dict):\n\n scalar_target_matrix = prediction_dict[prediction_io.SCALAR_TARGETS_KEY]\n scalar_prediction_matrix = (\n prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY]\n )\n\n num_examples = scalar_prediction_matrix.shape[0]\n num_ensemble_members = scalar_prediction_matrix.shape[-1]\n\n aux_target_matrix = numpy.full((num_examples, 0), numpy.nan)\n aux_prediction_matrix = numpy.full(\n (num_examples, 0, num_ensemble_members), numpy.nan\n )\n aux_target_field_names = []\n aux_predicted_field_names = []\n\n shortwave_surface_down_flux_index = -1\n shortwave_toa_up_flux_index = -1\n longwave_surface_down_flux_index = -1\n longwave_toa_up_flux_index = -1\n\n scalar_target_names = example_dict[example_utils.SCALAR_TARGET_NAMES_KEY]\n these_flux_names = [\n example_utils.SHORTWAVE_SURFACE_DOWN_FLUX_NAME,\n example_utils.SHORTWAVE_TOA_UP_FLUX_NAME\n ]\n\n if all([n in scalar_target_names for n in these_flux_names]):\n shortwave_surface_down_flux_index = scalar_target_names.index(\n example_utils.SHORTWAVE_SURFACE_DOWN_FLUX_NAME\n )\n shortwave_toa_up_flux_index = scalar_target_names.index(\n example_utils.SHORTWAVE_TOA_UP_FLUX_NAME\n )\n\n aux_target_field_names.append(SHORTWAVE_NET_FLUX_NAME)\n aux_predicted_field_names.append(SHORTWAVE_NET_FLUX_NAME)\n\n this_target_matrix = (\n scalar_target_matrix[:, [shortwave_surface_down_flux_index]] -\n scalar_target_matrix[:, [shortwave_toa_up_flux_index]]\n )\n aux_target_matrix = numpy.concatenate(\n (aux_target_matrix, this_target_matrix), axis=1\n )\n\n this_prediction_matrix = (\n scalar_prediction_matrix[:, [shortwave_surface_down_flux_index], :]\n - scalar_prediction_matrix[:, [shortwave_toa_up_flux_index], :]\n )\n aux_prediction_matrix = numpy.concatenate(\n (aux_prediction_matrix, this_prediction_matrix), axis=1\n )\n\n these_flux_names = [\n example_utils.LONGWAVE_SURFACE_DOWN_FLUX_NAME,\n example_utils.LONGWAVE_TOA_UP_FLUX_NAME\n ]\n\n if all([n in scalar_target_names for n in these_flux_names]):\n longwave_surface_down_flux_index = scalar_target_names.index(\n example_utils.LONGWAVE_SURFACE_DOWN_FLUX_NAME\n )\n longwave_toa_up_flux_index = scalar_target_names.index(\n example_utils.LONGWAVE_TOA_UP_FLUX_NAME\n )\n\n aux_target_field_names.append(LONGWAVE_NET_FLUX_NAME)\n aux_predicted_field_names.append(LONGWAVE_NET_FLUX_NAME)\n\n this_target_matrix = (\n scalar_target_matrix[:, [longwave_surface_down_flux_index]] -\n scalar_target_matrix[:, [longwave_toa_up_flux_index]]\n )\n aux_target_matrix = numpy.concatenate(\n (aux_target_matrix, this_target_matrix), axis=1\n )\n\n this_prediction_matrix = (\n scalar_prediction_matrix[:, [longwave_surface_down_flux_index], :] -\n scalar_prediction_matrix[:, [longwave_toa_up_flux_index], :]\n )\n aux_prediction_matrix = numpy.concatenate(\n (aux_prediction_matrix, this_prediction_matrix), axis=1\n )\n\n return {\n AUX_TARGET_NAMES_KEY: aux_target_field_names,\n AUX_PREDICTED_NAMES_KEY: aux_predicted_field_names,\n AUX_TARGET_VALS_KEY: aux_target_matrix,\n AUX_PREDICTED_VALS_KEY: aux_prediction_matrix,\n SHORTWAVE_SURFACE_DOWN_FLUX_INDEX_KEY:\n shortwave_surface_down_flux_index,\n SHORTWAVE_TOA_UP_FLUX_INDEX_KEY: shortwave_toa_up_flux_index,\n LONGWAVE_SURFACE_DOWN_FLUX_INDEX_KEY: longwave_surface_down_flux_index,\n LONGWAVE_TOA_UP_FLUX_INDEX_KEY: longwave_toa_up_flux_index\n }",
"def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction",
"def predict_single(self, data, version='default'):\n return self.skil.api.transformincrementalarray(\n deployment_name=self.deployment.name,\n transform_name=self.model_name,\n version_name=version,\n single_record=data\n )",
"def predict(self, dt=1):\n self.kf.predict()\n if self.time_since_update > 0: # there was missed detections\n self.continuing_hits = 0\n self.time_since_update += 1\n return self.kf.x[:self.dim_z].squeeze()"
] | [
"0.55769396",
"0.5293517",
"0.52770144",
"0.5273354",
"0.5251776",
"0.52444863",
"0.522911",
"0.51298296",
"0.507966",
"0.5062503",
"0.50453293",
"0.5021716",
"0.5021298",
"0.5006652",
"0.5005181",
"0.50023496",
"0.49902007",
"0.49683675",
"0.4965189",
"0.4964087",
"0.49365512",
"0.49313185",
"0.49211395",
"0.49162862",
"0.49092662",
"0.48989698",
"0.48897567",
"0.48854995",
"0.48840037",
"0.48748192"
] | 0.7810624 | 0 |
Get a dictionary with the important tags for DAGMC geometries inputs | def get_dagmc_tags(my_core):
dagmc_tags = {}
dagmc_tags['geom_dim'] = my_core.tag_get_handle('GEOM_DIMENSION', size=1, tag_type=types.MB_TYPE_INTEGER,
storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # geometric dimension
dagmc_tags['category'] = my_core.tag_get_handle('CATEGORY', size=32, tag_type=types.MB_TYPE_OPAQUE,
storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # the category
dagmc_tags['global_id'] = my_core.tag_get_handle('GLOBAL_ID', size=1, tag_type=types.MB_TYPE_INTEGER,
storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # id
return dagmc_tags | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTag(self, inputs, tag):\n result = {}\n for into in inputs:\n for i in into:\n if i in self.sim.agents:\n agentTags = self.sim.agents[i].access[\"tags\"]\n if tag in agentTags:\n result[i] = agentTags[tag]\n return result",
"def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())",
"def getFeatureDicts(self):\n return [self.data.getWordTagDict(), self.data.tags_trigrams, self.data.tags_bigrams]",
"def tags():",
"def comando_gne(self):\r\n if args.tag:\r\n\t if args.value:\r\n tags = self.alterar_gne_framework(args.tag, args.value)\r\n\t else:\r\n tags = self.ler_gne_framework(args.tag)\r\n\t return {args.tag:tags[args.tag]} # Ex: {\"nnf\":115}\r",
"def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict",
"def get_tag_dict(self):\n return self.tag_dict",
"def get_tags(self) -> Union[Dict[str, str], None]:\n # read the original value passed by the command\n tags = self.raw_param.get(\"tags\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.tags is not None:\n tags = self.mc.tags\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return tags",
"def get_tags_gff(tagline):\n\n tags = dict()\n for t in tagline.split(';'):\n tt = t.split('=')\n tags[tt[0]] = tt[1]\n return tags",
"def feature_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]]:\n return pulumi.get(self, \"feature_tags\")",
"def feature_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]]:\n return pulumi.get(self, \"feature_tags\")",
"def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)",
"def get_attached_tags(self, complexe_tags):\n attached_tags = []\n for tags in tqdm(complexe_tags):\n tokenized_tags = self.tokenize(tags)\n intersection_tags = self.list_intersection(tokenized_tags)\n attached_tags.append(intersection_tags)\n return attached_tags",
"def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")",
"def getFeatureDicts(self):\n pass",
"def tag_with_features(self, efeats):\n if len(efeats)==3:\n print \"d\"\n\n # build array of dicts\n state_dicts = []\n for e_phi in efeats: \n state_dicts = self.viterbi1(e_phi, state_dicts)\n \n \n # trace back\n yyhat, phis = self.traceback(efeats, state_dicts)\n assert len(efeats)==len(yyhat)#len(yyhat), \n\n return (yyhat, phis)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]]]:\n return pulumi.get(self, \"tags\")",
"def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")",
"def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")"
] | [
"0.6049964",
"0.5798417",
"0.5783547",
"0.56233895",
"0.56191564",
"0.5531968",
"0.54956996",
"0.5407354",
"0.54043907",
"0.5376861",
"0.5376861",
"0.53438914",
"0.53383356",
"0.5330779",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5323087",
"0.5320604",
"0.5311345",
"0.52669716",
"0.5262341",
"0.5250732",
"0.52448016",
"0.52448016",
"0.52448016"
] | 0.6765775 | 0 |
Get a dictionary with MOAB ranges for each of the requested entity types inputs | def get_native_ranges(my_core, meshset, entity_types):
native_ranges = {}
for entity_type in entity_types:
native_ranges[entity_type] = my_core.get_entities_by_type(
meshset, entity_type)
return native_ranges | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges",
"def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges",
"def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res",
"def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range",
"def get_etype_2_minmax_funcEnum(entitytype_arr):\n etype_2_minmax_funcEnum = {}\n s = pd.Series(entitytype_arr)\n for name, group in s.groupby(s):\n etype_2_minmax_funcEnum[name] = (min(group.index), max(group.index))\n return etype_2_minmax_funcEnum",
"def get_limits(age_groups):\n\n limits = {}\n for data in age_groups:\n pattern = re.compile(r'([\\d]+)-([\\d]+)')\n match = pattern.search(data)\n age_min = int(match.group(1).strip())\n age_max = int(match.group(2).strip())\n # print(f'limits = {age_min} to {age_max}')\n limits[f'Age_{data}'] = [age_min, age_max]\n return limits",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2",
"def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range",
"def test_get_meta_range(self):\n pass",
"def getRangeMM(self) -> float:\n ...",
"def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)",
"def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }",
"def map_range( self, rng ):\n rmap = {\n '2 nA': pac.Ammeter.CurrentRange.N2,\n '20 nA': pac.Ammeter.CurrentRange.N20,\n '200 nA': pac.Ammeter.CurrentRange.N200,\n '2 uA': pac.Ammeter.CurrentRange.U2,\n '20 uA': pac.Ammeter.CurrentRange.U20,\n '200 uA': pac.Ammeter.CurrentRange.U200,\n '2 mA': pac.Ammeter.CurrentRange.M2,\n '20 mA': pac.Ammeter.CurrentRange.M20\n }\n \n if rng in rmap:\n return rmap[ rng ]\n \n else:\n raise ValueError( 'Invalid range' )",
"def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out",
"def get_range(self):\n classes = concrete_descendents(self.class_)\n d=dict([(name,class_) for name,class_ in classes.items()])\n if self.allow_None:\n d['None']=None\n return d",
"def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds",
"def _get_area_incmfd_attr(max_np, max_hd, max_bins):\n\n att = []\n att.append({'name': 'src_id', 'type': 'String', 'len': 10})\n att.append({'name': 'src_name', 'type': 'String', 'len': 30})\n att.append({'name': 'tect_reg', 'type': 'String', 'len': 30})\n att.append({'name': 'upp_seismo', 'type': 'Real'})\n att.append({'name': 'low_seismo', 'type': 'Real'})\n att.append({'name': 'mag_scal_r', 'type': 'String', 'len': 15})\n att.append({'name': 'rup_asp_ra', 'type': 'Real'})\n att.append({'name': 'mfd_type', 'type': 'String', 'len': 20})\n\n att.append({'name': 'min_mag', 'type': 'Real'})\n att.append({'name': 'bin_width', 'type': 'Real'})\n att.append({'name': 'num_bins', 'type': 'Integer'})\n for i in range(1, max_bins+1):\n lab = 'or_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_npd', 'type': 'Integer'})\n for i in range(1, max_np+1):\n lab = 'weight_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'strike_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'rake_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'dip_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_hdd', 'type': 'Integer'})\n for i in range(1, max_hd+1):\n lab = 'hdd_d_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'hdd_w_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n return att",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"search_type\": [\n self.from_trigger_intent(\n intent=\"search_transactions\", value=\"spend\"\n ),\n self.from_trigger_intent(\n intent=\"check_earnings\", value=\"deposit\"\n ),\n ],\n \"time\": [\n self.from_entity(entity=\"time\"),\n ]\n }",
"def _get_entity_mappings(query_list: ProcessedQueryList) -> Dict:\n entity_labels = set()\n logger.info(\"Generating Entity Labels...\")\n for d, i, entities in zip(\n query_list.domains(), query_list.intents(), query_list.entities()\n ):\n if len(entities):\n for entity in entities:\n e = str(entity.entity.type)\n entity_labels.add(f\"{d}.{i}.B|{e}\")\n entity_labels.add(f\"{d}.{i}.I|{e}\")\n entity_labels.add(f\"{d}.{i}.S|{e}\")\n entity_labels.add(f\"{d}.{i}.E|{e}\")\n\n e = \"O|\"\n entity_labels.add(f\"{d}.{i}.{e}\")\n\n entity_labels = sorted(list(entity_labels))\n return dict(zip(entity_labels, range(len(entity_labels))))",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"amount_of_money\": [\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }",
"def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"bug\":[self.from_entity(\n entity=\"bug\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"beverage\": [self.from_entity(\n entity=\"beverage\", \n intent=\"inform\"), \n self.from_text(\n intent=\"inform\")],\n \"second_person_plural\": [self.from_entity(\n entity=\"second_person_plural\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"cot_caught\": [self.from_entity(\n entity=\"cot_caught\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rain_sun\": [self.from_entity(\n entity=\"rain_sun\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"crawfish\": [self.from_entity(\n entity=\"crawfish\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"halloween\": [self.from_entity(\n entity=\"halloween\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"sandwich\": [self.from_entity(\n entity=\"sandwich\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"side_road\": [self.from_entity(\n entity=\"side_road\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"shoes\": [self.from_entity(\n entity=\"shoes\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"highway\": [self.from_entity(\n entity=\"highway\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"yard_sale\": [self.from_entity(\n entity=\"yard_sale\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rubbernecking\": [self.from_entity(\n entity=\"rubbernecking\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"frosting\": [self.from_entity(\n entity=\"frosting\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"lawyer\": [self.from_entity(\n entity=\"lawyer\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"kitty_corner\": [self.from_entity(\n entity=\"kitty_corner\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"firefly\": [self.from_entity(\n entity=\"firefly\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"verge\": [self.from_entity(\n entity=\"verge\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"brew_thru\": [self.from_entity(\n entity=\"brew_thru\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"water_fountain\": [self.from_entity(\n entity=\"water_fountain\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")]\n }",
"def test_get_range(self):\n pass",
"def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []",
"def get_allowed_ranges(csvfile):\n from csv import DictReader\n ranges = {}\n with open(csvfile, 'r') as infile:\n # Remove spaces from field headers\n firstline = infile.readline()\n headers = [k.strip() for k in firstline.split(',')]\n if not len(headers) == 11:\n headers = [k.strip() for k in firstline.split(' ')]\n opfield = 'CSVv2;OperatingPoint'\n if not opfield in headers: opfield = 'cMVAv2;OperatingPoint'\n if not opfield in headers: opfield = 'CSV;OperatingPoint'\n\n reader = DictReader(infile, fieldnames=headers)\n for row in reader:\n key = (int(row[opfield].strip()),\n row['measurementType'].strip(),\n row['sysType'].strip(),\n int(row['jetFlavor'].strip()))\n ranges.setdefault(key, {})\n for var in ['eta', 'pt', 'discr']:\n mini = float(row['%sMin'%var].strip())\n maxi = float(row['%sMax'%var].strip())\n ranges[key]['%sMin'%var] = min(ranges[key].setdefault('%sMin'%var, mini), mini)\n ranges[key]['%sMax'%var] = max(ranges[key].setdefault('%sMax'%var, maxi), maxi)\n return ranges"
] | [
"0.65154475",
"0.600458",
"0.5998008",
"0.5853334",
"0.5807972",
"0.5797119",
"0.562755",
"0.562755",
"0.55852807",
"0.55669194",
"0.5557111",
"0.5455386",
"0.54513985",
"0.5441645",
"0.53997433",
"0.53997433",
"0.5393903",
"0.5376995",
"0.5375735",
"0.53752905",
"0.5361025",
"0.53542024",
"0.53466415",
"0.53338194",
"0.5326077",
"0.53103244",
"0.53081155",
"0.5306106",
"0.5288727",
"0.5277856"
] | 0.6155824 | 1 |
Get a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs | def get_entityset_ranges(my_core, meshset, geom_dim):
entityset_ranges = {}
entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']
for dimension, set_type in enumerate(entityset_types):
entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,
[dimension])
return entityset_ranges | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRangeMM(self) -> float:\n ...",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def get_native_ranges(my_core, meshset, entity_types):\n\n native_ranges = {}\n for entity_type in entity_types:\n native_ranges[entity_type] = my_core.get_entities_by_type(\n meshset, entity_type)\n return native_ranges",
"def getMassRange(brand):\n return mass_range[brand]",
"def test_get_meta_range(self):\n pass",
"def range_(self):\n return self.bset.range_",
"def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range",
"def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res",
"def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range",
"def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)",
"def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res",
"def get_range(self):\n classes = concrete_descendents(self.class_)\n d=dict([(name,class_) for name,class_ in classes.items()])\n if self.allow_None:\n d['None']=None\n return d",
"def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out",
"def get_range(self):\n # CEBHACKALERT: was written assuming it would only operate on\n # Parameterized instances. Think this is an sf.net bug/feature\n # request. Temporary fix: don't use obj.name if unavailable.\n try:\n d=dict([(obj.name,obj) for obj in self.objects])\n except AttributeError:\n d=dict([(obj,obj) for obj in self.objects])\n return d",
"def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges",
"def get_limits(age_groups):\n\n limits = {}\n for data in age_groups:\n pattern = re.compile(r'([\\d]+)-([\\d]+)')\n match = pattern.search(data)\n age_min = int(match.group(1).strip())\n age_max = int(match.group(2).strip())\n # print(f'limits = {age_min} to {age_max}')\n limits[f'Age_{data}'] = [age_min, age_max]\n return limits",
"def get_allowed_ranges(csvfile):\n from csv import DictReader\n ranges = {}\n with open(csvfile, 'r') as infile:\n # Remove spaces from field headers\n firstline = infile.readline()\n headers = [k.strip() for k in firstline.split(',')]\n if not len(headers) == 11:\n headers = [k.strip() for k in firstline.split(' ')]\n opfield = 'CSVv2;OperatingPoint'\n if not opfield in headers: opfield = 'cMVAv2;OperatingPoint'\n if not opfield in headers: opfield = 'CSV;OperatingPoint'\n\n reader = DictReader(infile, fieldnames=headers)\n for row in reader:\n key = (int(row[opfield].strip()),\n row['measurementType'].strip(),\n row['sysType'].strip(),\n int(row['jetFlavor'].strip()))\n ranges.setdefault(key, {})\n for var in ['eta', 'pt', 'discr']:\n mini = float(row['%sMin'%var].strip())\n maxi = float(row['%sMax'%var].strip())\n ranges[key]['%sMin'%var] = min(ranges[key].setdefault('%sMin'%var, mini), mini)\n ranges[key]['%sMax'%var] = max(ranges[key].setdefault('%sMax'%var, maxi), maxi)\n return ranges",
"def map_range( self, rng ):\n rmap = {\n '2 nA': pac.Ammeter.CurrentRange.N2,\n '20 nA': pac.Ammeter.CurrentRange.N20,\n '200 nA': pac.Ammeter.CurrentRange.N200,\n '2 uA': pac.Ammeter.CurrentRange.U2,\n '20 uA': pac.Ammeter.CurrentRange.U20,\n '200 uA': pac.Ammeter.CurrentRange.U200,\n '2 mA': pac.Ammeter.CurrentRange.M2,\n '20 mA': pac.Ammeter.CurrentRange.M20\n }\n \n if rng in rmap:\n return rmap[ rng ]\n \n else:\n raise ValueError( 'Invalid range' )",
"def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins",
"def ranges(self):\n return self._ranges",
"def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")",
"def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")",
"def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")",
"def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")",
"def mass_combinations(mH_min, mH_max, mH_step_size, mB_min, mB_max, mB_step_size):\n\n higgsino_masses = np.arange(mH_min, mH_max, mH_step_size)\n bino_masses = np.arange(mB_min, mB_max, mB_step_size)\n\n tuples = list(it.product(higgsino_masses, bino_masses))\n namedtuples = [MassCombination(*_tuple) for _tuple in tuples]\n return filter(lambda x: x.mH > x.mB + 126., namedtuples)",
"def getSets():",
"def range_table(self):\n range_table_base = []\n if self.block_mask != None:\n range_table_length = len(self.block_mask)\n else:\n range_table_length = self.block_num\n\n for i in range(range_table_length):\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.k_size))\n range_table_base.append(len(self.pool_type))\n\n return range_table_base",
"def getRange(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n try:\n iter = self.file.fetch(chr, start, end)\n # result = []\n # for x in iter:\n # returnBin = (x.reference_name, x.reference_start, x.reference_end, x.query_alignment_sequence, x.query_sequence)\n # result.append(returnBin)\n\n # if self.columns is None:\n # self.columns = [\"chr\", \"start\", \"end\", \"query_alignment_sequence\", \"query_sequence\"]\n\n # if respType is \"DataFrame\":\n # result = toDataFrame(result, self.columns)\n\n (result, _) = get_range_helper(self.toDF, self.get_bin,\n self.get_col_names, chr, start, end, iter, self.columns, respType)\n\n return result, None\n except ValueError as e:\n raise Exception(\"didn't find chromId with the given name\")",
"def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")"
] | [
"0.59818125",
"0.5639009",
"0.5639009",
"0.5624772",
"0.5565236",
"0.5513016",
"0.5494546",
"0.5442198",
"0.54400784",
"0.5380527",
"0.5366019",
"0.5358748",
"0.5318177",
"0.5313223",
"0.52917475",
"0.5266361",
"0.5262664",
"0.5223182",
"0.5186198",
"0.517666",
"0.5118012",
"0.51148266",
"0.51148266",
"0.51148266",
"0.51148266",
"0.5109687",
"0.5102881",
"0.50763667",
"0.50756514",
"0.5072759"
] | 0.6958238 | 0 |
Get side lengths of triangle inputs | def get_tri_side_length(my_core, tri):
side_lengths = []
s = 0
coord_list = []
verts = list(my_core.get_adjacencies(tri, 0))
for vert in verts:
coords = my_core.get_coords(vert)
coord_list.append(coords)
for side in range(3):
side_lengths.append(np.linalg.norm(coord_list[side]-coord_list[side-2]))
# The indices of coord_list includes the "-2" because this way each side will be matched up with both
# other sides of the triangle (IDs: (Side 0, Side 1), (Side 1, Side 2), (Side 2, Side 0))
return side_lengths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths",
"def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()",
"def square_triangle(sides: list) -> float:\n h_per = (sides[0] + sides[1] + sides[2]) / 2 #half-perimetr\n square = math.sqrt (h_per * (h_per- sides[0]) * (h_per - sides[1]) * (h_per - sides[2]))\n return square",
"def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))",
"def area_triangle(w, h):\n return w * h / 2",
"def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))",
"def triangleArea(a: Vec3, b: Vec3, c: Vec3) -> float:\n return cross3(b - a, c - a).length() / 2.0",
"def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )",
"def area_triangle_sss(side1, side2, side3):\n \n # Use Heron's formula\n semiperim = (side1 + side2 + side3) / 2.0\n return math.sqrt(semiperim *\n (semiperim - side1) *\n (semiperim - side2) * \n (semiperim - side3))",
"def triangle(self, freq: int, /) -> None:",
"def sides(self):\n return len(self)",
"def Lengths(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.element_type == \"line\":\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n else:\n # self.GetEdges()\n # coords = self.points[self.all_edges,:]\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n\n return lengths",
"def triangle(n):\n return n*(n+1)/2",
"def getSideLength():\n side = float(input(\"How long do you want the side length?\"))\n return side",
"def triangle(n):\n return (n * (n + 1)) / 2",
"def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)",
"def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount",
"def ui_input() -> list:\n print('Enter three sides of the triangle:')\n sides = [int(input()), int(input()), int(input())]\n return sides",
"def lengths(self):\n lengths = []\n last = self._coordinates[-1]\n for c in self._coordinates:\n lengths.append(((c[0]-last[0])**2 + (c[1]-last[1])**2) ** 0.5)\n last = c\n return sorted(lengths)",
"def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1",
"def triangular_area():\n print(1*1/2, 2*2/2, 3*3/2, 4*4/2, 5*5/2, 6*6/2, 7*7/2, 8*8/2, 9*9/2,\n 10*10/2)",
"def triangle_area(base, height):\n return (base * height) / 2",
"def len_func(polygon):\n ret=[]\n N=len(polygon)\n for i in range(1,N):\n l = ((polygon[i][0]-polygon[i-1][0])**2 + (polygon[i][1]-polygon[i-1][1])**2 )**0.5\n ret.append(l)\n l = ((polygon[0][0]-polygon[N-1][0])**2 + (polygon[0][1]-polygon[N-1][1])**2 )**0.5\n ret.append(l)\n return ret",
"def area_of_a_triangle(length_1, length_2, length_3):\r\n half_perimeter = (length_1 + length_2 + length_3) / 2\r\n area = (half_perimeter * (half_perimeter-length_1) * (half_perimeter-length_2) * (half_perimeter-length_3)) ** 0.5\r\n return area",
"def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T",
"def EdgeLengths(self,which_edges='boundary'):\n\n assert self.points is not None\n assert self.element_type is not None\n\n\n lengths = None\n if which_edges == 'boundary':\n if self.edges is None:\n self.GetBoundaryEdges()\n\n edge_coords = self.points[self.edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n elif which_edges == 'all':\n if self.all_edges is None:\n self.GetEdges()\n\n edge_coords = self.points[self.all_edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n return lengths",
"def triangle(n: int) -> int:\n return int(n * (n + 1) / 2)",
"def get_sides(vertices):\n return [dist(vertices[1], vertices[2]),\n dist(vertices[2], vertices[0]),\n dist(vertices[0], vertices[1])]",
"def triangle(a, b, c):\n longest = max(a, b, c)\n \n sum_of_others = a + b + c - longest # or min(a+b, a+c, b+c)\n \n return longest < sum_of_others",
"def triangle(halfSideLength = 15, robotHeight = -90):\n# ^ \n# / \\ \n# / \\ \n# / \\ \n# /_______\\\n# \n# | a | \n# a = halfSideLength\n\n hHalf = (halfSideLength * m.sqrt(3)/2)/2\n\n posTriangle = [\n [-hHalf,halfSideLength,robotHeight,0,0,0,'mov'],\n [-hHalf,-halfSideLength,robotHeight,0,0,0,'lin'],\n [hHalf,0,robotHeight,0,0,0,'lin'],\n [-hHalf,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posTriangle"
] | [
"0.80329156",
"0.73527324",
"0.6605369",
"0.6578953",
"0.653027",
"0.6457566",
"0.6410448",
"0.6380816",
"0.6373851",
"0.634458",
"0.63425255",
"0.62799424",
"0.62611526",
"0.62526083",
"0.6235124",
"0.6221507",
"0.62141633",
"0.61444443",
"0.6128147",
"0.61095893",
"0.6104889",
"0.60801274",
"0.60788375",
"0.6071254",
"0.6066173",
"0.6061882",
"0.6059281",
"0.60473496",
"0.6004745",
"0.60017395"
] | 0.7491572 | 1 |
Cleans the line from geometrical shape characters and replaces these with space. | def clean_text_from_geometrical_shape_unicode(line):
line = re.sub(r"([\u25A0-\u25FF])", " ", line)
return line | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line",
"def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line",
"def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities CR LF to space\n line = line.replace(u' ', ' ')\n line = line.replace(u' ', ' ')\n line = line.replace(u' ', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line",
"def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)",
"def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new",
"def quote( self, aLine ):\n clean= aLine\n for from_, to_ in self.quoted_chars:\n clean= clean.replace( from_, to_ )\n return clean",
"def remove_arrows(line):\n pattern = r'[\\+\\-][0-9]+'\n if type(line) != str:\n if (pd.isnull(line) == True) | (np.isnan(line) == True):\n return 'NaN'\n else:\n return line\n else:\n line = re.search(pattern, line)\n return line[0]",
"def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text",
"def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line",
"def remove_space(line):\n split_line = line.split()\n return \"\".join(split_line)",
"def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line",
"def squash_crs(string):\n if isinstance(string, str):\n return re.sub('\\n[^\\n]+\\r', '\\n', string)\n else:\n return re.sub(b'\\n[^\\n]+\\r', b'\\n', string)",
"def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line",
"def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line",
"def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")",
"def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)",
"def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi",
"def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData",
"def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text",
"def unlines(line):\n\n return line.translate(str.maketrans('\\n', ' '))",
"def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text",
"def strip_other_charcter():\n pass",
"def process_line(line:str) -> str:\n s = replace_multispace(replace_newlines(line))\n return s",
"def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))",
"def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)",
"def replace_spaces_with_pluses(self, sample):\r\n changed = list(sample)\r\n for i, c in enumerate(changed):\r\n if(c == ' ' or c ==' ' or c ==' ' or c=='\\n' or c=='\\n\\n'):\r\n changed[i] = '+'\r\n return ''.join(changed)",
"def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])",
"def clean_text(text):\n return text.replace('\\n', ' ').replace('\\r', ' ')",
"def cleaning(string):\n\n if type(string) == float or type(string) == int:\n return string\n res = ''\n if string != string:\n return string\n string = string.replace(\"\\\\r\", \"\")\n string = string.replace(\"\\\\n\", \"\")\n string = string.replace(\"\\\\b\", \"\")\n string = string.replace(\"\\\\t\", \"\")\n for i in string:\n if i.isalpha():\n res = res + i\n return res.lower()",
"def f_shp(i):\n return i.replace('(', '').replace(')', '').replace(', ', 'x').replace(',', '')"
] | [
"0.6750238",
"0.6354936",
"0.60908276",
"0.60867965",
"0.6052043",
"0.5917007",
"0.58838075",
"0.58618855",
"0.5852398",
"0.58450764",
"0.5843433",
"0.584121",
"0.5809004",
"0.5809004",
"0.5773769",
"0.5744268",
"0.573116",
"0.57237",
"0.56972486",
"0.5695229",
"0.5684892",
"0.5674085",
"0.56583124",
"0.5650387",
"0.559185",
"0.55685383",
"0.5549865",
"0.55386126",
"0.55377525",
"0.5536837"
] | 0.8284751 | 1 |
Cleans the line from private unicode characters and replaces these with space. | def clean_text_from_private_unicode(line):
line = re.sub(r"([\uE000-\uF8FF]|\uD83C[\uDF00-\uDFFF]|\uD83D[\uDC00-\uDDFF])", " ", line)
return line | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line",
"def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line",
"def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])",
"def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line",
"def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")",
"def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace('©', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities CR LF to space\n line = line.replace(u' ', ' ')\n line = line.replace(u' ', ' ')\n line = line.replace(u' ', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line",
"def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line",
"def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")",
"def unlines(line):\n\n return line.translate(str.maketrans('\\n', ' '))",
"def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)",
"def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text",
"def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)",
"def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)",
"def strip_other_charcter():\n pass",
"def quote( self, aLine ):\n clean= aLine\n for from_, to_ in self.quoted_chars:\n clean= clean.replace( from_, to_ )\n return clean",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)",
"def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text",
"def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet",
"def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()",
"def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.rstrip('\\n')\n output_file.write(line)",
"def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line",
"def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new",
"def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})",
"def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt",
"def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r",
"def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent"
] | [
"0.6923628",
"0.6923628",
"0.6871189",
"0.68013835",
"0.6612186",
"0.6590212",
"0.643773",
"0.6276485",
"0.6224105",
"0.6136164",
"0.60882586",
"0.6054826",
"0.6043292",
"0.60300654",
"0.6018717",
"0.6001594",
"0.5968716",
"0.59620196",
"0.59620196",
"0.5959176",
"0.593358",
"0.5914048",
"0.5872205",
"0.5867018",
"0.5842751",
"0.57787925",
"0.57743746",
"0.5762607",
"0.5756598",
"0.57400644"
] | 0.8404962 | 1 |
return a model as defines in model_search.yaml | def get_model_from_yaml(name):
filename = pkg_resources.resource_filename('empirical_lsm', 'data/model_search.yaml')
with open(filename) as f:
model_dict = yaml.load(f)[name]
return get_model_from_dict(model_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model(model=gin.REQUIRED):\n return model",
"def get_model(*args):\n return Model()",
"def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]",
"def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)",
"def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj",
"def get_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_definition(request.db)[modelname]\n for result in results:\n return result.value\n raise NotFound(\"Unknown model %s\" % modelname)",
"def model(self) -> Type[Model]:",
"def model() -> Model:\n return Model()",
"def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model",
"def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)",
"def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")",
"def model(self):\n return MODELS.get(self._model,self._model)",
"def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )",
"def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)",
"def get_model():\n return UNISAL",
"def get_model(name, **model_args):\n module = importlib.import_module('.' + name, 'models')\n return module.build_model(**model_args)",
"def search_model():\n search_condition = request.stream.read()\n try:\n search_condition = json.loads(search_condition if search_condition else \"{}\")\n except Exception:\n raise ParamValueError(\"Json data parse failed.\")\n\n model_lineage_info = _get_lineage_info(\n lineage_type=\"model\",\n search_condition=search_condition\n )\n\n return jsonify(model_lineage_info)",
"def get_model(self, name):\n bundle_name, model_name = name.split(\".\")\n bundle = self.bundles[bundle_name]\n model = bundle.models[name]\n return model",
"def retrieve_model(self, model_name):\n\t\tmodel_detail = dbop.get_model(self, model_name)\n\t\t#since the 'owner' field of model_detail is only owner's username,\n\t\t#we have to change it to a User object\n\t\t#In this case, the owner of this model is the user itself\n\t\tmodel_detail['owner'] = self\n\t\tif model_detail['model_type'] == 'SPSS Predictive Model':\n\t\t\treturn model.SPSSModel(**model_detail)\n\t\telif model_detail['model_type'] == 'DashDB In-database Model':\n\t\t\treturn model.DashdbModel(**model_detail)",
"def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model",
"def get_model(self):\n return self.model"
] | [
"0.72097623",
"0.6948634",
"0.67616165",
"0.67475533",
"0.66909075",
"0.66902417",
"0.66678",
"0.6573517",
"0.65641904",
"0.65121186",
"0.651177",
"0.6477959",
"0.6459214",
"0.6455929",
"0.64499646",
"0.6413741",
"0.6407674",
"0.64009804",
"0.6380913",
"0.6357358",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213",
"0.63377213"
] | 0.6980467 | 1 |
Return a sklearn model pipeline from a model_dict | def get_model_from_dict(model_dict):
pipe_list = []
if 'transforms' in model_dict:
# For basic scikit-learn transforms
transforms = model_dict['transforms'].copy()
if 'scaler' in transforms:
scaler = transforms.pop('scaler')
pipe_list.append(get_scaler(scaler))
if 'pca' in transforms:
transforms.pop('pca')
pipe_list.append(get_pca())
if 'poly' in transforms:
args = transforms.pop('poly')
pipe_list.append(get_poly(args))
if len(transforms) > 0:
raise Exception("unknown transforms: %s" % repr(transforms))
if 'args' in model_dict:
model = get_model_class(model_dict['class'], model_dict['args'])
else:
model = get_model_class(model_dict['class'])
if 'clusterregression' in model_dict:
from empirical_lsm.clusterregression import ModelByCluster
clusterer = model_dict['clusterregression']['class']
cluster_args = model_dict['clusterregression']['args']
model = ModelByCluster(
get_clusterer(clusterer, cluster_args),
model)
pipe_list.append(model)
pipe = make_pipeline(*pipe_list)
if 'lag' in model_dict:
params = model_dict['lag']
pipe = get_lagger(pipe, params)
elif 'markov' in model_dict:
params = model_dict['markov']
pipe = get_markov_wrapper(pipe, params)
if 'forcing_vars' in model_dict:
pipe.forcing_vars = model_dict['forcing_vars']
else:
logger.warning("Warning: no forcing vars, using defaults (all)")
pipe.forcing_vars = get_config(['vars', 'met'])
if 'description' in model_dict:
pipe.description = model_dict['description']
return pipe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline",
"def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model",
"def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model",
"def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline",
"def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe",
"def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor())\n ])),\n ('clf', DecisionTreeClassifier())\n ])\n\n parameters = [\n {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (DecisionTreeClassifier(min_samples_split=3),),\n 'clf__max_depth': (None, 4)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MultiOutputClassifier(LinearSVC(multi_class='ovr')),)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MLPClassifier(),),\n 'clf__hidden_layer_sizes': ((100, 10), (50,), (50, 10))\n }\n ]\n\n cv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=4, verbose=10)\n \n return cv",
"def build_model():\n pipeline = Pipeline([('cvect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LinearSVC(multi_class=\"crammer_singer\"), n_jobs=1))\n ])\n\n parameters = {\n 'clf__estimator__C': 1,\n 'clf__estimator__max_iter': 1000 }\n \n model = GridSearchCV(pipeline, param_grid=parameters)\n\n\n return model",
"def build_model():\n pipeline = Pipeline(\n [\n (\"vect\", CountVectorizer(tokenizer=tokenize)),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", MultiOutputClassifier(LinearSVC(dual=False))),\n ]\n )\n\n # use grid search to optimize the pipeline parameters\n parameters = {\"tfidf__use_idf\": (True, False), \"clf__estimator__C\": [1, 100]}\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv",
"def build_model():\n nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ])),\n \n ('strarting_verb', StartingVerbExtractor())\n \n ])),\n\n ('clf', MultiOutputClassifier(estimator = AdaBoostClassifier(random_state = 42)))\n\n ])\n \n parameters = {\"clf__estimator__learning_rate\": [0.1, 0.5, 1.0],\n \"clf__estimator__n_estimators\": [25, 50, 75]\n }\n \n from sklearn.model_selection import GridSearchCV\n cv = GridSearchCV(pipeline, param_grid = parameters) \n \n return cv",
"def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize, max_df = 0.75, max_features = 5000, ngram_range = (1, 2))),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(estimator=RandomForestClassifier(n_estimators = 200, min_samples_split = 2)))\n ])\n \n return pipeline",
"def build_model():\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n])\n \n # specify parameters for grid search\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.75, 1.0)\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs= 8, cv = 3, verbose = 2)\n\n return cv",
"def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])",
"def model_pipeline(catnums):\n pipe = make_pipeline(\n Imputer(strategy='most_frequent'),\n OneHotEncoder(categorical_features=catnums, sparse=False),\n PolynomialFeatures(),\n Ridge(alpha=25)\n )\n return pipe",
"def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n \n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n # specify parameters for grid search\n parameters = {\n 'clf__estimator__n_estimators': [50],\n 'clf__estimator__learning_rate': [1]\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv",
"def build_model():\n #\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer())])),\n ('starting_verb', StartingVerbExtractor())])),\n ('clf', RandomForestClassifier())\n ])\n \n # hyerparameters for grid to search within\n# parameters = [{'clf__bootstrap': [False, True],\n# 'clf__bootstrap': [False, True],\n# 'clf__n_estimators': [80,90, 100, 110, 130],\n# 'clf__max_features': [0.6, 0.65, 0.7, 0.73, 0.7500000000000001, 0.78, 0.8],\n# 'clf__min_samples_leaf': [10, 12, 14],\n# 'clf__min_samples_split': [3, 5, 7]\n# }\n# ]\n\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__text_pipeline__vect__max_features': (None, 5000, 10000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf__n_estimators': [50, 80, 90, 100, 200],\n 'clf__min_samples_split': [2, 3, 4, 5, 7],\n 'features__transformer_weights': (\n {'text_pipeline': 1, 'starting_verb': 0.5},\n {'text_pipeline': 0.5, 'starting_verb': 1},\n {'text_pipeline': 0.8, 'starting_verb': 1},\n )\n }\n\n\n # Final model ready to be applied on dataset\n model = GridSearchCV(pipeline, param_grid=parameters)\n \n return model",
"def build_model(self):\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline",
"def model_fn(model_dir):\n model_path = Path(model_dir)/\"model.joblib\"\n clf = joblib.load(model_path)\n return clf",
"def create_model(X, y, clf_info, permute):\n import numpy as np\n from sklearn.pipeline import Pipeline\n\n def to_instance(clf_info):\n mod = __import__(clf_info[0], fromlist=[clf_info[1]])\n params = {}\n if len(clf_info) > 2:\n params = clf_info[2]\n clf = getattr(mod, clf_info[1])(**params)\n if len(clf_info) == 4:\n from sklearn.model_selection import GridSearchCV\n\n clf = GridSearchCV(clf, param_grid=clf_info[3])\n return clf\n\n if isinstance(clf_info[0], list):\n # Process as a pipeline constructor\n steps = []\n for val in clf_info:\n step = to_instance(val)\n steps.append((val[1], step))\n pipe = Pipeline(steps)\n else:\n clf = to_instance(clf_info)\n from sklearn.preprocessing import StandardScaler\n\n pipe = Pipeline([(\"std\", StandardScaler()), (clf_info[1], clf)])\n\n y = y.ravel()\n if permute:\n pipe.fit(X, y[np.random.permutation(range(len(y)))])\n else:\n pipe.fit(X, y)\n predicted = pipe.predict(X)\n return (y, predicted), pipe",
"def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 100],\n 'clf__estimator__min_samples_split': [2, 3, 5],\n 'clf__estimator__criterion': ['entropy', 'gini']\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv",
"def build_model():\n # build pipeline with count vecotrizer, tfidf and support vector machine\n pipeline_SVC = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('multi-clf', MultiOutputClassifier(LinearSVC()))\n ])\n\n # define parameters for gridsearch\n parameters_SVC = {\n 'vect__max_df': (.6, 1),\n 'tfidf__norm': ('l1', 'l2'),\n 'multi-clf__estimator__C': (.1, 1, 100)\n }\n\n # build parameter grid and fit data\n model = GridSearchCV(pipeline_SVC, parameters_SVC)\n\n return model",
"def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize, min_df = 5)),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 10,\n min_samples_split = 10)))\n ])\n\n # Create parameters dictionary\n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 25],\n 'clf__estimator__min_samples_split':[2, 5, 10]}\n\n # create grid search\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv",
"def build_model(search = False):\n\n logging.info(\"run build_model\")\n\n # pipeline definition\n pipeline = Pipeline([\n ('tokenize', TokenizeTransform()), # split text into lemmatized words\n ('tfidf_emb', TfidfEmbeddingVectorizer()),\n ('clf', MultiOutputClassifier(GradientBoostingClassifier()))\n ], verbose=True)\n\n # set pipeline parameters\n pipeline.set_params(**{\n 'tfidf_emb__size':300,\n 'tfidf_emb__iter':200,\n 'tfidf_emb__min_count': 3,\n\n 'clf__estimator__max_depth': 10,\n 'clf__estimator__n_estimators':50,\n 'clf__estimator__min_samples_split':4,\n 'clf__estimator__random_state':0,\n 'clf__estimator__random_state': 0,\n })\n\n if search == True:\n parameters = {\n 'tfidf_emb__size': (200, 300),\n 'tfidf_emb__iter': (100, 200),\n 'tfidf_emb__min_count': (3, 5),\n\n 'clf__estimator__max_depth': (10,13),\n 'clf__estimator__n_estimators': (20, 30),\n 'clf__estimator__min_samples_split': (2,4),\n }\n\n pipeline = GridSearchCV(pipeline, parameters)\n\n return pipeline",
"def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LogisticRegression(random_state=2020), n_jobs=-1))\n ])\n\n parameters = {\n 'clf__estimator__C': [1, 2, 4],\n 'clf__estimator__penalty': ['l1', 'l2']\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5)\n\n return cv",
"def build(X, y=None):\n model = Pipeline([\n ('preprocessor',NLTKPreprocessor()),\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(C=0.9)))])\n\n model.fit(X, y)\n return model",
"def build_model():\n # Pipeline of CountVextorizer, TfdifTransformer and MultiOutputClassifier\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 30],\n 'clf__estimator__min_samples_split': [3, 2] \n }\n \n cv = GridSearchCV(pipeline, param_grid= parameters, verbose=2, n_jobs=4)\n return cv",
"def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv",
"def build_model():\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(DecisionTreeClassifier()))\n ])\n\n \n parameters = {'clf__estimator__min_samples_split':[2, 4, 6],\n 'clf__estimator__max_depth': [2, 4]}\n\n #parameters = {'clf__estimator__min_samples_split':[2]}\n cv = GridSearchCV(pipeline, parameters)\n\n return(cv)",
"def train_model(\r\n train_x: pd.DataFrame,\r\n train_y: pd.DataFrame,\r\n parameters: Dict[str, Any]\r\n) -> sklearn_Pipeline:\r\n # Build a multi-class logistic regression model\r\n model_params = parameters['model_params']\r\n model = LogisticRegression(**model_params)\r\n\r\n if parameters['model_standard_scaler']:\r\n # Prepare column transformer to do scaling\r\n col_transformer = ColumnTransformer(\r\n [\r\n (\r\n 'standard_scaler',\r\n StandardScaler(copy=False),\r\n [\r\n \"sepal_length\",\r\n \"sepal_width\",\r\n \"petal_length\",\r\n \"petal_width\",\r\n ],\r\n ),\r\n ],\r\n remainder='drop',\r\n )\r\n\r\n # Make pipeline w/ scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('col_transformer', col_transformer),\r\n ('model', model),\r\n ]\r\n )\r\n else:\r\n # Make pipeline w/o scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('model', model),\r\n ]\r\n )\r\n\r\n # Fit\r\n model_pipeline.fit(train_x, train_y)\r\n\r\n mlflow.set_experiment('iris-example')\r\n mlflow_sklearn.log_model(sk_model=model_pipeline, artifact_path=\"model\")\r\n mlflow.log_params(model_params)\r\n\r\n # Print out the model pipeline\r\n # See: http://www.xavierdupre.fr/app/mlinsights/helpsphinx/notebooks/visualize_pipeline.html\r\n dot = pipeline2dot(model_pipeline, train_x)\r\n dot_filename = 'pipeline_dot.dot'\r\n with open(dot_filename, 'w', encoding='utf-8') as f:\r\n f.write(dot)\r\n if sys.platform.startswith(\"win\") and \"Graphviz\" not in os.environ[\"PATH\"]:\r\n os.environ['PATH'] = os.environ['PATH'] + r';C:\\Program Files (x86)\\Graphviz2.38\\bin'\r\n cmd = \"dot -G=300 -Tpng {0} -o{0}.png\".format(dot_filename)\r\n run_cmd(cmd, wait=True, fLOG=print)\r\n mlflow.log_artifact('{0}.png'.format(dot_filename), 'model')\r\n\r\n return model_pipeline",
"def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n parameters = {\n 'vect__stop_words': ['english',None],\n 'tfidf__smooth_idf': [True, False],\n 'tfidf__norm': ['l2','l1'],\n 'clf__estimator__learning_rate': [0.5, 1, 2],\n 'clf__estimator__n_estimators': [20, 60, 100]\n }\n\n clf_grid_model = RandomizedSearchCV(pipeline,\n parameters,\n cv=3,\n refit=True,\n verbose=10,\n n_jobs=-1)\n return clf_grid_model",
"def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)"
] | [
"0.6688918",
"0.64640874",
"0.64640874",
"0.6416069",
"0.6381701",
"0.636211",
"0.6302781",
"0.62598276",
"0.62242967",
"0.6168712",
"0.6149896",
"0.6133813",
"0.6130566",
"0.61174124",
"0.6096542",
"0.6070486",
"0.6065497",
"0.60585046",
"0.6046623",
"0.6042597",
"0.603112",
"0.5973031",
"0.59639597",
"0.5930727",
"0.5905255",
"0.59017104",
"0.5893399",
"0.5892971",
"0.58887047",
"0.58774185"
] | 0.771603 | 0 |
Return a Lag wrapper for a pipeline. | def get_lagger(pipe, kwargs):
from .transforms import LagWrapper
return LagWrapper(pipe, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)",
"def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)",
"def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)",
"def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p",
"def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline",
"def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline",
"def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError",
"def pipelines(self):\r\n return pipelines.Pipelines(self)",
"def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline",
"def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road",
"def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)",
"def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )",
"def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe",
"def pipeline(self):\n return self._pipeline",
"def pipeline(self):\n return self._pipeline",
"def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )",
"def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline",
"def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Directory\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Directory(_ctx)",
"def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )",
"def pipelines(self):\n return PipelineManager(session=self._session)",
"def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))",
"def get_pipeline_driver(module_name, passed_args=None):\n _imports = __import__(module_name, fromlist=[\"get_pipeline\"])\n kwargs = convert_struct(passed_args)\n return _imports.get_pipeline(**kwargs)",
"def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Container(_ctx)",
"def get_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_decompose = SecondaryNode('class_decompose', nodes_from=[node_logit, node_scaling])\n node_rfr = SecondaryNode('rfr', nodes_from=[node_decompose])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_rfr, node_logit])\n\n pipeline = Pipeline(node_xgboost)\n return pipeline",
"def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe",
"def make_pipeline(slam, settings):\n\n pipeline_name = \"pipeline_source[inversion]\"\n\n \"\"\"\n This pipeline is tagged according to whether:\n\n 1) Hyper-fitting settings (galaxies, sky, background noise) are used.\n 2) The lens galaxy mass model includes an `ExternalShear`.\n 3) The `Pixelization` and `Regularization` scheme of the pipeline (fitted in phases 3 & 4).\n \"\"\"\n\n path_prefix = f\"{slam.path_prefix}/{pipeline_name}/{slam.source_inversion_tag}\"\n\n \"\"\"\n Phase 1: Fit the `Pixelization` and `Regularization`, where we:\n\n 1) Fix the lens mass model to the `MassProfile`'s inferred by the previous pipeline.\n \"\"\"\n\n phase1 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[1]_mass[fixed]_source[inversion_magnification_initialization]\",\n n_live_points=30,\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last.instance.galaxies.lens.mass,\n shear=af.last.instance.galaxies.lens.shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=af.last.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=af.last.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase1 = phase1.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 2: Fit the lens`s mass and source galaxy using the magnification `Inversion`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 1.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of the previous pipeline.\n \"\"\"\n\n phase2 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[2]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last[-1].model.galaxies.lens.mass,\n shear=af.last[-1].model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase1.result.instance.galaxies.source.pixelization,\n regularization=phase1.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase1.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase1.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase1.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase2 = phase2.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 3: fit the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the lens `MassProfile` to the result of phase 2.\n \"\"\"\n\n phase3 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[3]_mass[fixed]_source[inversion_initialization]\",\n n_live_points=30,\n evidence_tolerance=slam.setup_hyper.evidence_tolerance,\n sample=\"rstagger\",\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=phase2.result.instance.galaxies.lens.mass,\n shear=phase2.result.instance.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=slam.pipeline_source_inversion.setup_source.pixelization_prior_model,\n regularization=slam.pipeline_source_inversion.setup_source.regularization_prior_model,\n hyper_galaxy=phase2.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase2.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase2.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase3 = phase3.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 4: fit the lens`s mass using the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 3.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of phase 2.\n \"\"\"\n\n mass = slam.pipeline_source_parametric.setup_mass.mass_prior_model_with_updated_priors(\n index=-1, unfix_mass_centre=True\n )\n\n phase4 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[4]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=mass,\n shear=phase2.result.model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase3.result.instance.galaxies.source.pixelization,\n regularization=phase3.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase3.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase3.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase3.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase4 = phase4.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=True\n )\n\n return al.PipelineDataset(\n pipeline_name, path_prefix, phase1, phase2, phase3, phase4\n )",
"def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline",
"def get_loading_pipeline(pipeline):\n loading_pipeline = []\n for transform in pipeline:\n is_loading = is_loading_function(transform)\n if is_loading is None: # MultiScaleFlipAug3D\n # extract its inner pipeline\n if isinstance(transform, dict):\n inner_pipeline = transform.get('transforms', [])\n else:\n inner_pipeline = transform.transforms.transforms\n loading_pipeline.extend(get_loading_pipeline(inner_pipeline))\n elif is_loading:\n loading_pipeline.append(transform)\n assert len(loading_pipeline) > 0, \\\n 'The data pipeline in your config file must include ' \\\n 'loading step.'\n return loading_pipeline"
] | [
"0.614468",
"0.5957362",
"0.59200114",
"0.58368886",
"0.5657546",
"0.54006594",
"0.53980225",
"0.53207415",
"0.53056926",
"0.53035724",
"0.5302075",
"0.52976626",
"0.5268746",
"0.52655923",
"0.5263156",
"0.52361935",
"0.52361935",
"0.52285284",
"0.52276915",
"0.5182775",
"0.5180908",
"0.51678705",
"0.51635337",
"0.5153052",
"0.51524246",
"0.5142166",
"0.51246774",
"0.51197654",
"0.5113149",
"0.511194"
] | 0.8226096 | 0 |
Return a markov wrapper for a pipeline. | def get_markov_wrapper(pipe, kwargs):
from .transforms import MarkovWrapper
return MarkovWrapper(pipe, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline",
"def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline",
"def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)",
"def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)",
"def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline",
"def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )",
"def pipeline(self):\n steps = [('DummyDefense', DummyDefense()),\n ('DummyClassifier', DummyClassifier())]\n return Pipeline(steps)",
"def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)",
"def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)",
"def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\")",
"def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe",
"def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline",
"def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline",
"def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline",
"def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline",
"def _pipeline(self, vectorizer, n_features, ngram_range, C):\n classifier = SVC(kernel=\"linear\", C=C, max_iter=1000000, shrinking=1, tol=0.0001)\n vectorizer.set_params(stop_words=None, max_features=self.max_features, ngram_range=ngram_range)\n \n checker_pipeline = Pipeline([\n ('vectorizer', vectorizer),\n ('reduce_dim', SelectKBest(chi2, k=n_features)),\n ('classify', classifier)])\n\n return checker_pipeline",
"def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\", package={\"ner\": [\"ncbi_disease\", \"ontonotes\"]})",
"def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe",
"def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline",
"def build_naive_bayes():\n nb_pipeline = None\n ##### Write code here\n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB())\n ])\n\n ##### End of your work ######\n return nb_pipeline",
"def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)",
"def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline",
"def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])",
"def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)",
"def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline",
"def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError",
"def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)",
"def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline"
] | [
"0.6571192",
"0.6444065",
"0.6408322",
"0.60807306",
"0.6047931",
"0.5914984",
"0.59031147",
"0.58204484",
"0.5804149",
"0.5747107",
"0.5725643",
"0.5704614",
"0.5703797",
"0.56960964",
"0.56624687",
"0.56623226",
"0.5654284",
"0.562496",
"0.5591383",
"0.55484897",
"0.5527152",
"0.55214953",
"0.5515504",
"0.5454731",
"0.5431739",
"0.5416454",
"0.5387037",
"0.536923",
"0.5362079",
"0.5361307"
] | 0.83327645 | 0 |
Return a scikitlearn clusterer from name and args. | def get_clusterer(name, kwargs):
if name == 'KMeans':
from sklearn.cluster import KMeans
return KMeans(**kwargs)
if name == 'MiniBatchKMeans':
from sklearn.cluster import MiniBatchKMeans
return MiniBatchKMeans(**kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]",
"def create_marker_cluster(name: str):\n return MarkerCluster(name=name)",
"def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)",
"def cluster(args):\n\n # if not (args.coverage or args.index):\n # logging.error('Must specify a coverage file or contigs + reference index.')\n\n logging.info('Starting clustering process')\n perform_clustering(args)",
"def parse_clustering(key, content):\n if inspect.isclass(key):\n cl = key(**content)\n key = cl.__class__.__name__.lower()\n\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() != 'hierarchical':\n # Wrapper class that automatically detects the best number of clusters\n # via 10-Fold CV\n content.pop('n_clusters', '')\n content.pop('preference', '')\n\n kwargs = {'param_grid': [], 'n_jobs': -1,\n 'scoring': silhouette_score, 'cv': 10}\n\n if key.lower() == 'kmeans':\n content.setdefault('init', 'k-means++')\n content.setdefault('n_jobs', 1)\n kwargs['estimator'] = KMeans(**content)\n elif key.lower() == 'ap':\n kwargs['estimator'] = AffinityPropagation(**content)\n kwargs['affinity'] = kwargs['estimator'].affinity\n else:\n logging.error(\"n_clusters = 'auto' specified outside kmeans or \"\n \"ap. Trying to create GridSearchCV pipeline anyway \"\n \" ...\")\n cl = GridSearchCV(**kwargs)\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() == 'hierarchical':\n # TODO implement this\n # from adenine.utils.extensions import AgglomerativeClustering\n cl = AgglomerativeClustering(**content)\n else:\n if key.lower() == 'kmeans':\n content.setdefault('n_jobs', -1)\n cl = KMeans(**content)\n elif key.lower() == 'ap':\n content.setdefault('preference', 1)\n cl = AffinityPropagation(**content)\n elif key.lower() == 'ms':\n cl = MeanShift(**content)\n elif key.lower() == 'spectral':\n cl = SpectralClustering(**content)\n elif key.lower() == 'hierarchical':\n cl = AgglomerativeClustering(**content)\n else:\n cl = DummyNone()\n return (key, cl, 'clustering')",
"def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)",
"def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }",
"def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))",
"def __str__(self):\n return \"Clustering\"",
"def parse():\n intro = \"\"\"\\\n Use this script to bootstrap, join nodes within a Galera Cluster\n ----------------------------------------------------------------\n Avoid joining more than one node at once!\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=lambda prog:\n argparse.RawDescriptionHelpFormatter(prog, max_help_position=29),\n description=textwrap.dedent(intro),\n epilog=\"Author: Massimiliano Adamo <[email protected]>\")\n parser.add_argument(\n '-cg', '--check-galera', help='check if all nodes are healthy',\n action='store_true', dest='Cluster(None, None).checkonly()',\n required=False)\n parser.add_argument(\n '-dr', '--dry-run', help='show SQL statements to run on this cluster',\n action='store_true', dest='Cluster(None, None).show_statements()',\n required=False)\n parser.add_argument(\n '-je', '--join-existing', help='join existing Cluster',\n action='store_true',\n dest='Cluster(\"existing\", \"existing\").joincluster()', required=False)\n parser.add_argument(\n '-be', '--bootstrap-existing', help='bootstrap existing Cluster',\n action='store_true', dest='Cluster(None, \"existing\").createcluster()',\n required=False)\n parser.add_argument(\n '-jn', '--join-new', help='join new Cluster', action='store_true',\n dest='Cluster(\"new\", \"new\").joincluster()', required=False)\n parser.add_argument(\n '-bn', '--bootstrap-new', action='store_true',\n help='bootstrap new Cluster',\n dest='Cluster(None, \"new\").createcluster()', required=False)\n parser.add_argument(\n '-f', '--force', action='store_true',\n help='force bootstrap new or join new Cluster', required=False)\n\n return parser.parse_args()",
"def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)",
"def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def is_sklearn_clusterer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"clusterer\"",
"def createCluster(method, n_clust=3, min_samples=5):\n if method == 'SpectralClustering':\n clust = SpectralClustering(n_clusters=n_clust)\n clust.fit(PC)\n scat = plt.scatter(-100, -100, zorder=2)\n elif method == 'OPTICS':\n clust = OPTICS(min_samples=min_samples)\n clust.fit(PC)\n scat = plt.scatter(PC[clust.labels_ == -1, 0],\n PC[clust.labels_ == -1, 1], c='k')\n return clust, scat",
"def kozakov2015(*args, **kwargs):\n clusters = []\n for sel in args:\n cluster = Cluster(\"\", sel, pm.get_coords(sel))\n clusters.append(cluster)\n\n ensemble = Kozakov2015Ensemble(clusters)\n print(\n textwrap.dedent(\n f\"\"\"\n {ensemble}\n Class {ensemble.klass}\n S {ensemble.strength}\n S0 {ensemble.strength0}\n CD {ensemble.max_center_to_center}\n MD {ensemble.max_dist}\n \"\"\"\n )\n )",
"def __str__(self):\n return \"Cluster\"",
"def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)",
"def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance",
"def get_cluster(self) -> 'AioCluster':\n return AioCluster(self)",
"def create_cluster(self, name, cluster_type, params, ssh_key, *args, **kwargs):\n raise NotImplementedError",
"def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output",
"def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None",
"def main():\n if sys.argv[1] == \"start\":\n start_cluster(sys.argv[2], sys.argv[3], int(sys.argv[4]),\n int(sys.argv[5]), sys.argv[6], sys.argv[7],\n int(sys.argv[8]))\n elif sys.argv[1] == \"stop\":\n stop_cluster()\n else:\n print 'Unknown Option'",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ClusterArgs.__new__(ClusterArgs)\n\n __props__.__dict__[\"allocation_state\"] = None\n __props__.__dict__[\"allocation_state_transition_time\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"current_node_count\"] = None\n __props__.__dict__[\"errors\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_setup\"] = None\n __props__.__dict__[\"node_state_counts\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"provisioning_state_transition_time\"] = None\n __props__.__dict__[\"scale_settings\"] = None\n __props__.__dict__[\"subnet\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"user_account_settings\"] = None\n __props__.__dict__[\"virtual_machine_configuration\"] = None\n __props__.__dict__[\"vm_priority\"] = None\n __props__.__dict__[\"vm_size\"] = None\n return Cluster(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"cluster_id\"] = cluster_id\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"size_gb\"] = size_gb\n __props__.__dict__[\"tags\"] = tags\n return Cluster(resource_name, opts=opts, __props__=__props__)",
"def _initialize_cluster(filename):\n\tstar_cluster = cluster.Cluster(filename)\n\tprint(\"\\nYour star cluster is being created ...\")\n\tstar_cluster.populate_celestials()\n\treturn star_cluster",
"def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')",
"def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta"
] | [
"0.5950856",
"0.58060235",
"0.5799281",
"0.5795873",
"0.5752199",
"0.56631964",
"0.55473256",
"0.5533583",
"0.5423363",
"0.54131496",
"0.53921217",
"0.53843373",
"0.53843373",
"0.53843373",
"0.5361153",
"0.5359831",
"0.53507227",
"0.53416944",
"0.5282299",
"0.5266684",
"0.526383",
"0.52311033",
"0.5228705",
"0.52240753",
"0.5216106",
"0.51854485",
"0.51847214",
"0.5178425",
"0.5169556",
"0.5166051"
] | 0.7380628 | 0 |
get a sklearn scaler from a scaler name | def get_scaler(scaler):
if scaler == 'standard':
from sklearn.preprocessing import StandardScaler
return StandardScaler()
if scaler == 'minmax':
from sklearn.preprocessing import MinMaxScaler
return MinMaxScaler() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __create_scaler_type(self):\n\n if self.scalertype == \"standard\":\n return StandardScaler()\n if self.scalertype == \"minmax\":\n return MinMaxScaler(feature_range=self.featureRange)\n assert True, \"An error occured when creating a scaler of type '{}'\".format(self.scalertype)",
"def get_normalizer(data):\n scaler = StandardScaler().fit(data)\n return scaler",
"def _load_scaler(self, scaler_file):\n assert isinstance(scaler_file, str),\\\n \"scaler_file not entered as string.\"\n self.scaler = joblib.load(file_path(scaler_file))\n return",
"def from_name(cls: Type[AutoScalerPolicy], name: str) -> AutoScalerPolicy:\n try:\n return cls[name.upper()]\n except KeyError:\n raise RuntimeError(f'Unknown {cls.__name__} \\'{name}\\'')",
"def any_preprocessing(name):\n return hp.choice('%s' % name, [\n [pca(name + '.pca')],\n [standard_scaler(name + '.standard_scaler')],\n [min_max_scaler(name + '.min_max_scaler')],\n [normalizer(name + '.normalizer')],\n # -- not putting in one-hot because it can make vectors huge\n #[one_hot_encoder(name + '.one_hot_encoder')],\n []\n ])",
"def test_scaler_attribute_type(self, scaler, scaler_type):\n\n x = ScalingTransformer(columns=\"b\", scaler=scaler)\n\n assert (\n type(x.scaler) is scaler_type\n ), f\"unexpected scaler set in init for {scaler}\"",
"def get_norm(name):\n if name in _metrics.keys():\n return _metrics[name]\n raise ValueError(\"Name '{}' does not stand for any known norm\", name)",
"def get_clf_and_scaler(data_path, pickle_file='./data/classifier.p'):\n clf, scaler = load_model(pickle_file)\n if clf == None:\n clf, scaler = train_classifier(data_path)\n save_model(clf, scaler, pickle_file)\n\n return clf, scaler",
"def compute_scaler(args):\n workspace = args.workspace\n data_type = args.data_type\n dir_name = args.dir_name \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x2d)\n print(scaler.mean_)\n print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))",
"def train_scaler(dset, varname=None, row_dim='time', transform=True):\n \n try: \n from dask_ml.preprocessing import StandardScaler\n except: \n from sklearn.preprocessing import StandardScaler\n \n dset = dset[varname]\n space_dims = tuple(x for x in dset.dims if x != row_dim)\n dset_stack = dset.stack(z=space_dims) \n scaler = StandardScaler()\n if transform: \n data_std = scaler.fit_transform(dset_stack.data)\n dset_stack.data = data_std\n dset = dset_stack.unstack()\n return dset, scaler\n else:\n return None, scaler",
"def compute_scaler(data_type):\n workspace = config.workspace\n\n if data_type == 'train':\n snr = config.Tr_SNR\n \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = StandardScaler(with_mean=True, with_std=True).fit(x2d)\n# print(scaler.mean_)\n# print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))",
"def get_sklearn_model(x):\n if is_sklearn_model(x):\n return x # already a valid model\n elif type(x) is dict:\n if hasattr(x, 'model'):\n return get_sklearn_model(x['model'])\n else:\n return None\n elif type(x) is str:\n # noinspection PyBroadException\n try:\n return get_sklearn_model(eval(x))\n except:\n pass\n return None",
"def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)",
"def build_scale_controller(name: str, kwargs=None) -> Union[ScaleControllerBase, None]:\n if not name or name == 'none':\n return None\n controller_choices = {\n 'learn': LearnableScaleController,\n 'fix': FixedScaleController,\n 'relu': ReluScaleController,\n 'exp': ExpScaleController,\n 'softmax': SoftmaxScaleController,\n 'norm': NormalizeScaleController,\n }\n if name not in controller_choices:\n raise KeyError('Wrong scale controller name.')\n controller_type = controller_choices[name]\n return controller_type(**kwargs) if kwargs else controller_type()",
"def set_scalers(self, df):\n print_info('Setting scalers with training data...')\n\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n\n # Format real scalers\n real_inputs = extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Initialise scaler caches\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n\n if len(sliced) >= self._time_steps:\n\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n\n # Format categorical scalers\n categorical_inputs = extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n\n # Extract identifiers in case required\n self.identifiers = identifiers",
"def load_X_scaler(self, out_tag='lstm_scaler'): \n\n print ('loading X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n self.X_scaler = load(open('models/{}_X_scaler.pkl'.format(out_tag),'rb'))",
"def __create_scaler(self):\n \n self.scaler = {}\n for component in self.comp_list:\n self.scaler[component] = self.__create_scaler_type()",
"def scale_data(x: np.ndarray, scaler=None) -> Tuple[np.ndarray, object]:\n original_shape = x.shape\n\n # reshape data to 2d array\n x = x.reshape((x.shape[0], -1))\n\n if scaler is None:\n scaler = StandardScaler().fit(x)\n\n x = scaler.transform(x)\n\n # reshape back\n x = x.reshape(original_shape)\n return x, scaler",
"def _get_classifier_to_name(self, classifier_name: str) -> object:\n\n classifiers = all_estimators('classifier')\n clf_class = [clf[1] for clf in classifiers if clf[0] == classifier_name]\n\n if clf_class:\n return clf_class[0]()\n \n else:\n msg = f'The passed classifier name \\'{classifier_name}\\' has no corresponding classifier, please make sure that the passed name corresponds to an actual sklearn classifier.'\n raise NameError(msg)",
"def get_X_scaler(self, X_train, out_tag='lstm_scaler', save=True):\n\n X_scaler = StandardScaler()\n X_scaler.fit(X_train.values)\n self.X_scaler = X_scaler\n if save:\n print('saving X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n dump(X_scaler, open('models/{}_X_scaler.pkl'.format(out_tag),'wb'))",
"def test_scaler_initialised_with_scaler_kwargs(\n self, mocker, scaler, scaler_type_str, scaler_kwargs_value\n ):\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.__init__\", return_value=None\n )\n\n ScalingTransformer(\n columns=\"b\", scaler=scaler, scaler_kwargs=scaler_kwargs_value\n )\n\n assert mocked.call_count == 1, \"unexpected number of calls to init\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert (\n call_pos_args == ()\n ), f\"unexpected positional args in {scaler_type_str} init call\"\n\n assert (\n call_kwargs == scaler_kwargs_value\n ), f\"unexpected kwargs in {scaler_type_str} init call\"",
"def from_config(cls, config: dict):\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n\n _scaler_config = config['config'].pop('scaler_to_standardize_')\n setattr(scaler, '_scaler', StandardScaler.from_config(_scaler_config))\n\n rescaler = config['config'].pop('rescaler_config_')\n if rescaler:\n setattr(scaler, 'rescaler_', MinMaxScaler.from_config(rescaler))\n else:\n setattr(scaler, 'rescaler_', None)\n\n pre_standardizer = config['config'].pop('pre_center_config_')\n if pre_standardizer:\n setattr(scaler, 'pre_centerer_', Center.from_config(pre_standardizer))\n else:\n setattr(scaler, 'pre_centerer_', None)\n\n for attr, attr_val in config['config'].items():\n setattr(scaler, attr, attr_val)\n\n if isinstance(scaler.lambdas_, float):\n scaler.lambdas_ = [scaler.lambdas_]\n return scaler",
"def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])",
"def set_scalers(self, df):\n print('Setting scalers with training data...')\n\n column_definitions = self.get_column_definition()\n id_column = utils.get_single_col_by_input_type(InputTypes.ID,\n column_definitions)\n target_column = utils.get_single_col_by_input_type(InputTypes.TARGET,\n column_definitions)\n\n # Extract identifiers in case required\n self.identifiers = list(df[id_column].unique())\n\n # Format real scalers\n real_inputs = utils.extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n data = df[real_inputs].values\n self._real_scalers = sklearn.preprocessing.StandardScaler().fit(data)\n self._target_scaler = sklearn.preprocessing.StandardScaler().fit(\n df[[target_column]].values) # used for predictions\n\n # Format categorical scalers\n categorical_inputs = utils.extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(\n srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes",
"def scaled_component(self, key):\n\n if key in self.components:\n dat = self.components[key] \n # Aliases\n elif key in component_from_alias:\n comp = component_from_alias[key]\n if comp in self.components:\n dat = self.components[comp] \n else:\n # Component not present, make zeros\n return np.zeros(self.shape)\n else:\n raise ValueError(f'Component not available: {key}')\n \n # Multiply by scale factor\n factor = self.factor \n \n if factor != 1:\n return factor*dat\n else:\n return dat",
"def test_scaler_fit_call(self, mocker, scaler, scaler_type_str):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.fit\", return_value=None\n )\n\n x.fit(df)\n\n assert mocked.call_count == 1, \"unexpected number of calls to scaler fit\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n expected_positional_args = (df[[\"b\", \"c\"]],)\n\n h.assert_equal_dispatch(\n expected=expected_positional_args,\n actual=call_pos_args,\n msg=f\"unexpected positional args in {scaler_type_str} fit call\",\n )\n\n assert call_kwargs == {}, f\"unexpected kwargs in {scaler_type_str} fit call\"",
"def get_normalization_layer(name: str, ds: tf.data.Dataset, weighted=False):\r\n # Normalization layer for the feature\r\n normalizer = tf.keras.layers.experimental.preprocessing.Normalization(axis=None)\r\n\r\n # Dataset that only yields specified feature\r\n if weighted:\r\n feature_ds = ds.map(lambda x, y, w: x[name])\r\n else:\r\n feature_ds = ds.map(lambda x, y: x[name])\r\n\r\n # Adapt the layer to the data scale\r\n normalizer.adapt(feature_ds)\r\n\r\n return normalizer",
"def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0",
"def test_scaler():\n raw = io.read_raw_fif(raw_fname)\n events = read_events(event_name)\n picks = pick_types(raw.info, meg=True, stim=False, ecg=False,\n eog=False, exclude='bads')\n picks = picks[1:13:3]\n\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True)\n epochs_data = epochs.get_data()\n scaler = Scaler(epochs.info)\n y = epochs.events[:, -1]\n\n X = scaler.fit_transform(epochs_data, y)\n assert_true(X.shape == epochs_data.shape)\n X2 = scaler.fit(epochs_data, y).transform(epochs_data)\n assert_array_equal(X2, X)\n # these should be across time\n assert_allclose(X.std(axis=-2), 1.)\n assert_allclose(X.mean(axis=-2), 0., atol=1e-12)\n\n # Test inverse_transform\n Xi = scaler.inverse_transform(X, y)\n assert_array_almost_equal(epochs_data, Xi)\n\n for kwargs in [{'with_mean': False}, {'with_std': False}]:\n scaler = Scaler(epochs.info, **kwargs)\n scaler.fit(epochs_data, y)\n assert_array_almost_equal(\n X, scaler.inverse_transform(scaler.transform(X)))\n # Test init exception\n assert_raises(ValueError, scaler.fit, epochs, y)\n assert_raises(ValueError, scaler.transform, epochs, y)",
"def scale_data(x, y, x_scale_f = '../saved_models/scalers/9_params_21_2_x_scaler.pkl', y_scale_f = '../saved_models/scalers/9_params_21_2_y_scaler.pkl', par_slice = range(7) + range(8,9)):\n\tx_scaler = sklearn.externals.joblib.load(x_scale_f)\n\ty_scaler = sklearn.externals.joblib.load(y_scale_f)\n\tx_scaler.transform(x)\n\ty_scaler.transform(y)\n\tx = x[:,par_slice] \n\treturn x, y, x_scaler, y_scaler"
] | [
"0.68367714",
"0.63356686",
"0.620714",
"0.61540365",
"0.58169883",
"0.57977337",
"0.57299215",
"0.57015646",
"0.56682044",
"0.5622776",
"0.5611353",
"0.55854297",
"0.5540283",
"0.5517815",
"0.5501262",
"0.54594105",
"0.5455747",
"0.5352761",
"0.5330781",
"0.5323924",
"0.53156656",
"0.5280825",
"0.5212627",
"0.52125156",
"0.51884276",
"0.5158087",
"0.5147804",
"0.5147335",
"0.51376003",
"0.5111783"
] | 0.81780565 | 0 |
get a PCA decomposition | def get_pca():
from sklearn.decomposition import PCA
return PCA() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPCA(data):\n #covM = np.cov(data.T) #note that np.cov define row as variables, col as observations\n #corM = np.corrcoef(data.T) # we will use correlation matrix instead of cov.\n covM = np.cov(data.T)\n eigvalue,eigvector = np.linalg.eig(covM) # each col of the eigvector matrix corresponds to one eigenvalue. So, each col is the coeff of one component\n pca = np.dot(data,eigvector) # each col is one pca, each row is one obs in that pca. \n return eigvalue,eigvector,pca",
"def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;",
"def pca_decomposition(data, dept, n_components=12):\n try:\n df_svd = pivot_df(data, dept)\n pca = PCA(n_components=n_components)\n df_low = pca.fit_transform(df_svd)\n df_inverse = pca.inverse_transform(df_low)\n\n # re-frame\n df_inverse = reframe_df(previous_df=df_svd, processed_data=df_inverse)\n return df_inverse\n\n except:\n # if pca fail,\n return pivot_df(data, dept)",
"def pca(self):\n return DataFramePCA(self.subset_)",
"def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)",
"def pca(X, ndim):\n X_m = X - np.mean(X, axis=0)\n u, s, vh = np.linalg.svd(X_m)\n # traditional notation decomp(A) = U (sigma) VT = (u * s) @ vh\n W = vh[0:ndim].T\n # X_m = X - np.mean(X, axis=0)\n return np.matmul(X_m, W)",
"def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self",
"def pca(self, X):\n return ImgCompression.svd(self, X)",
"def get3dPCA(data):\n\n return PCA(n_components = 3).fit_transform(data)",
"def apply_PCA(data, ncomp):\n import sklearn.decomposition as dc\n \n pca = dc.PCA(n_components=ncomp, whiten=False, svd_solver='full')\n cps = pca.fit_transform(data)\n svl = pca.singular_values_\n return cps,pca,svl",
"def pca(features, components=6):\n pca = PCA(n_components=components)\n transformed = pca.fit(features).transform(features)\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(transformed)\n return scaler.transform(transformed), pca, scaler",
"def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca",
"def PCA(X, k):\n cov = np.matmul(np.matrix.transpose(X), X)\n w, v = np.linalg.eig(cov)\n k_largest = np.argsort(w)[::-1][:k]\n v = np.matrix.transpose(v)\n U = v[k_largest]\n S = w[k_largest]\n return U, S",
"def pca(data):\n mean = data.sum(axis=0) / data.shape[0]\n # show_image(mean)\n cv_matrix = np.cov(data.T)\n e_values, e_vectors = la.eig(cv_matrix)\n return e_values, e_vectors.T, mean",
"def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)",
"def PCA(X, dims_rescaled_data=21):\n # pca = decomposition.PCA(n_components=3)\n # x_std = StandardScaler().fit_transform(X)\n # a = pca.fit_transform(x_std)\n\n R = np.cov(X, rowvar=False)\n evals, evecs = scipy.linalg.eigh(R)\n idx = np.argsort(evals)[::-1]\n evecs = evecs[:,idx]\n\n evals = evals[idx]\n evecs = evecs[:, :dims_rescaled_data]\n\n newX = np.dot(evecs.T, X.T).T\n\n return newX #, evals, evecs",
"def PCA(data, n=2):\n U, S, Vt = np.linalg.svd(data, full_matrices=False)\n s = np.diag(S)\n newdata = np.dot(U[:, :n], np.dot(s[:n, :n], Vt[:n,:]))\n return newdata",
"def do_pca(x_data, n_class):\n\n run_pca = decomposition.PCA(n_components = n_class)\n pca_fit = run_pca.fit(x_data)\n #pca_fit\n x_pca = run_pca.transform(x_data);\n #pca_cov = run_pca.get_covariance(x_pca)\n #pca_score = run_pca.score(x_data)\n pca_noise = pca_fit.noise_variance_\n pca_var_explained = pca_fit.explained_variance_ratio_\n\n return x_pca, pca_noise, pca_var_explained",
"def pca(data, components):\n\n\t_pca = PCA(n_components = components)\n\t_pca.fit(data)\n\tvar = _pca.explained_variance_ratio_\n\tcum_var = np.cumsum(np.round(var, decimals=4)*100)\n\tfig = plt.plot(cum_var)\n\trotation = pd.DataFrame(\n\t\t_pca.components_,\n\t\tcolumns = data.columns,\n\t\tindex = ['PC-1','PC-2','PC-3','PC-4','PC-5','PC-6','PC-7','PC-8','PC-9',]\n\t\t)\n\n\treturn (fig, rotation)",
"def princomp(A):\n # computing eigenvalues and eigenvectors of covariance matrix\n M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)\n [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted\n score = np.dot(coeff.T,M) # projection of the data in the new space\n return coeff,score,latent",
"def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s",
"def doPCA(pairs, embedding, num_components=10):\n matrix = []\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b)) / 2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components=num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca",
"def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX",
"def kernelpca(X, n_comp):\n estimator = decomposition.KernelPCA(n_components = n_comp, kernel = 'rbf')\n estimator.fit(X)\n X_proj = estimator.transform(X)\n return estimator.components_, X_proj,",
"def principle_component_analysis(data_frame, dim=2):\n pca = PCA(n_components=dim)\n sc = StandardScaler()\n y = data_frame.loc[:, [\"Label\"]].values\n x = pd.DataFrame(data_frame[\"Vector\"].tolist())\n x = sc.fit_transform(x)\n principlecomponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(data=principlecomponents)\n data_frame[\"Vector\"] = principalDf.values.tolist()",
"def pca_algorithm(self):\n if self.rotation_algo == 'randomized':\n return PCA(svd_solver='randomized', random_state=self.random_state)\n elif self.rotation_algo == 'pca':\n return PCA()\n else:\n raise ValueError(\"`rotation_algo` must be either \"\n \"'pca' or 'randomized'.\")",
"def pca_algorithm(self):\n if self.rotation_algo == 'randomized':\n return PCA(svd_solver='randomized', random_state=self.random_state)\n elif self.rotation_algo == 'pca':\n return PCA()\n else:\n raise ValueError(\"`rotation_algo` must be either \"\n \"'pca' or 'randomized'.\")",
"def performPCA(dataSet, numShapesInDataset, numPointsInShapes, num_components):\n\tdataMat = np.array(dataSet).reshape((numShapesInDataset, numPointsInShapes*2))\n\t\n\t\"\"\"Creating the covariance matrix\"\"\"\n\tcovarMat = np.cov(dataMat.T)\n\t\t\n\t\"\"\"Generating the eigen vectors and eigen values\"\"\"\n\teigVals, eigVecs = np.linalg.eig(covarMat)\n\n\t\"\"\"Taking the first num_components eigen vectors and values, and the center of the space.\"\"\"\n\tprincipleComponents = np.real(eigVecs[:, 0:num_components])\n\tprincipleValues = np.real(eigVals[0:num_components])\n\tmeanShape = dataMat.mean(0).reshape((numPointsInShapes * 2, 1))\n\treturn principleComponents, principleValues, meanShape",
"def getPCA(matrix):\n eVal, eVec = np.linalg.eigh(matrix)\n indices = eVal.argsort()[::-1] # arguments for sorting eVal desc\n eVal, eVec = eVal[indices], eVec[:, indices]\n eVal = np.diagflat(eVal)\n return eVal, eVec",
"def PCA (numpy_cloud ):\r\n\r\n # abort, if there are no points\r\n if (numpy_cloud.shape[0] == 0):\r\n #print (\"In normals.py, in PCA: The input array is empty. Returning a null vector and high sigma\")\r\n return np.array ((0, 0, 0)), 1.0, np.array ((0, 0, 0))\r\n\r\n # we only need three colums [X, Y, Z, I] -> [X, Y, Z]\r\n numpy_cloud = numpy_cloud[:, :3].copy () # copying takes roughly 0.000558 seconds per 1000 points\r\n cloud_size = numpy_cloud.shape[0]\r\n\r\n # get covariance matrix\r\n a_transposed_a, mass_center = build_covariance_matrix (numpy_cloud )\r\n\r\n # get normal vector and smallest eigenvalue\r\n normal_vector, smallest_eigenvalue = eigenvalue_decomposition (a_transposed_a )\r\n\r\n # the noise is based on the smallest eigenvalue and normalized by number of points in cloud\r\n noise = smallest_eigenvalue\r\n if (cloud_size <= 3 or noise < 1 * 10 ** -10):\r\n sigma = noise # no noise with 3 points\r\n else:\r\n sigma = sqrt(noise/(cloud_size - 3) )\r\n\r\n return normal_vector, sigma, mass_center"
] | [
"0.76003706",
"0.74183345",
"0.73857856",
"0.73070925",
"0.7215321",
"0.7163089",
"0.7066037",
"0.7064292",
"0.7050474",
"0.7049609",
"0.70191026",
"0.7006228",
"0.6999517",
"0.6968802",
"0.6924137",
"0.68874854",
"0.6884852",
"0.68622917",
"0.67792535",
"0.6774958",
"0.67328346",
"0.6731503",
"0.67106754",
"0.6646318",
"0.66424024",
"0.6629324",
"0.6629324",
"0.66278887",
"0.6620293",
"0.656829"
] | 0.84510195 | 0 |
get a PolynomialFeatures transform | def get_poly(kwargs):
from sklearn.preprocessing import PolynomialFeatures
return PolynomialFeatures(**kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi",
"def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X",
"def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly",
"def _make_features(self, x):\n\t\tx = x.unsqueeze(1)\n\t\treturn torch.cat([x ** i for i in range(1, self._degree+1)], 1)",
"def make_features(x):\n x = x.unsqueeze(1)\n # torch.cat 实现tensor拼接\n return torch.cat([x ** i for i in range(1, POLY_DEGREE + 1)], 1)",
"def build_poly_by_feature(tx, degrees):\n poly_tempt = np.ones([tx.shape[0],1])\n for idx, degree in enumerate(degrees):\n feature_poly = build_poly(tx[:,idx], int(degree))\n poly_tempt = np.c_[poly_tempt, feature_poly[:,1:]]\n return poly_tempt",
"def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p",
"def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names",
"def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T",
"def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out",
"def map_feature(x):\n m, n = x.shape\n out = x\n\n # Add quodratic features.\n for i in range(n):\n for j in range(i, n):\n out = hstack((out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1)))\n\n # Add cubic features.\n for i in range(n):\n for j in range(i, n):\n for k in range(j, n):\n out = hstack(\n (out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1) * x[:, k].reshape(m, 1)))\n return out",
"def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None",
"def polytrans(features,features_test,features_oos,poly): \n \n features['FEMA_21'] = poly.fit_transform(np.nan_to_num(features.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features['FEMA_8'] = poly.fit_transform(np.nan_to_num(features.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features['FADRLo'] = poly.fit_transform(np.nan_to_num(features.FADRLo.astype(np.float32)).reshape(-1, 1))\n features['FADRHi'] = poly.fit_transform(np.nan_to_num(features.FADRHi.astype(np.float32)).reshape(-1, 1))\n features['FRVI40'] = poly.fit_transform(np.nan_to_num(features.FRVI40.astype(np.float32)).reshape(-1, 1))\n features['FRVI60'] = poly.fit_transform(np.nan_to_num(features.FRVI60.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FSMA200'] = poly.fit_transform(np.nan_to_num(features.FSMA200.astype(np.float32)).reshape(-1, 1))\n features['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features['FPP'] = poly.fit_transform(np.nan_to_num(features.FPP.astype(np.float32)).reshape(-1, 1))\n features['FS38'] = poly.fit_transform(np.nan_to_num(features.FS38.astype(np.float32)).reshape(-1, 1))\n features['FS62'] = poly.fit_transform(np.nan_to_num(features.FS62.astype(np.float32)).reshape(-1, 1))\n features['FS100'] = poly.fit_transform(np.nan_to_num(features.FS100.astype(np.float32)).reshape(-1, 1))\n features['FS138'] = poly.fit_transform(np.nan_to_num(features.FS138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FS162.astype(np.float32)).reshape(-1, 1))\n features['FS200'] = poly.fit_transform(np.nan_to_num(features.FS200.astype(np.float32)).reshape(-1, 1))\n features['FR38'] = poly.fit_transform(np.nan_to_num(features.FR38.astype(np.float32)).reshape(-1, 1))\n features['FR62'] = poly.fit_transform(np.nan_to_num(features.FR62.astype(np.float32)).reshape(-1, 1))\n features['FR100'] = poly.fit_transform(np.nan_to_num(features.FR100.astype(np.float32)).reshape(-1, 1))\n features['FR138'] = poly.fit_transform(np.nan_to_num(features.FR138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FR162.astype(np.float32)).reshape(-1, 1))\n features['FR200'] = poly.fit_transform(np.nan_to_num(features.FR200.astype(np.float32)).reshape(-1, 1))\n features['SBATR'] = poly.fit_transform(np.nan_to_num(features.SBATR.astype(np.float32)).reshape(-1, 1))\n \n features_test['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_test['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_test['FADRLo'] = poly.fit_transform(np.nan_to_num(features_test.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_test['FADRHi'] = poly.fit_transform(np.nan_to_num(features_test.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI40'] = poly.fit_transform(np.nan_to_num(features_test.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI60'] = poly.fit_transform(np.nan_to_num(features_test.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_test['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_test.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FSMA200'] = poly.fit_transform(np.nan_to_num(features_test.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_test['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_test.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_test['FPP'] = poly.fit_transform(np.nan_to_num(features_test.FPP.astype(np.float32)).reshape(-1, 1))\n features_test['FS38'] = poly.fit_transform(np.nan_to_num(features_test.FS38.astype(np.float32)).reshape(-1, 1))\n features_test['FS62'] = poly.fit_transform(np.nan_to_num(features_test.FS62.astype(np.float32)).reshape(-1, 1))\n features_test['FS100'] = poly.fit_transform(np.nan_to_num(features_test.FS100.astype(np.float32)).reshape(-1, 1))\n features_test['FS138'] = poly.fit_transform(np.nan_to_num(features_test.FS138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FS162.astype(np.float32)).reshape(-1, 1))\n features_test['FS200'] = poly.fit_transform(np.nan_to_num(features_test.FS200.astype(np.float32)).reshape(-1, 1))\n features_test['FR38'] = poly.fit_transform(np.nan_to_num(features_test.FR38.astype(np.float32)).reshape(-1, 1))\n features_test['FR62'] = poly.fit_transform(np.nan_to_num(features_test.FR62.astype(np.float32)).reshape(-1, 1))\n features_test['FR100'] = poly.fit_transform(np.nan_to_num(features_test.FR100.astype(np.float32)).reshape(-1, 1))\n features_test['FR138'] = poly.fit_transform(np.nan_to_num(features_test.FR138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FR162.astype(np.float32)).reshape(-1, 1))\n features_test['FR200'] = poly.fit_transform(np.nan_to_num(features_test.FR200.astype(np.float32)).reshape(-1, 1))\n features_test['SBATR'] = poly.fit_transform(np.nan_to_num(features_test.SBATR.astype(np.float32)).reshape(-1, 1))\n\n features_oos['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_oos['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRLo'] = poly.fit_transform(np.nan_to_num(features_oos.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRHi'] = poly.fit_transform(np.nan_to_num(features_oos.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI40'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI60'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_oos['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_oos.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FSMA200'] = poly.fit_transform(np.nan_to_num(features_oos.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_oos['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_oos.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_oos['FPP'] = poly.fit_transform(np.nan_to_num(features_oos.FPP.astype(np.float32)).reshape(-1, 1))\n features_oos['FS38'] = poly.fit_transform(np.nan_to_num(features_oos.FS38.astype(np.float32)).reshape(-1, 1))\n features_oos['FS62'] = poly.fit_transform(np.nan_to_num(features_oos.FS62.astype(np.float32)).reshape(-1, 1))\n features_oos['FS100'] = poly.fit_transform(np.nan_to_num(features_oos.FS100.astype(np.float32)).reshape(-1, 1))\n features_oos['FS138'] = poly.fit_transform(np.nan_to_num(features_oos.FS138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FS162.astype(np.float32)).reshape(-1, 1))\n features_oos['FS200'] = poly.fit_transform(np.nan_to_num(features_oos.FS200.astype(np.float32)).reshape(-1, 1))\n features_oos['FR38'] = poly.fit_transform(np.nan_to_num(features_oos.FR38.astype(np.float32)).reshape(-1, 1))\n features_oos['FR62'] = poly.fit_transform(np.nan_to_num(features_oos.FR62.astype(np.float32)).reshape(-1, 1))\n features_oos['FR100'] = poly.fit_transform(np.nan_to_num(features_oos.FR100.astype(np.float32)).reshape(-1, 1))\n features_oos['FR138'] = poly.fit_transform(np.nan_to_num(features_oos.FR138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FR162.astype(np.float32)).reshape(-1, 1))\n features_oos['FR200'] = poly.fit_transform(np.nan_to_num(features_oos.FR200.astype(np.float32)).reshape(-1, 1))\n features_oos['SBATR'] = poly.fit_transform(np.nan_to_num(features_oos.SBATR.astype(np.float32)).reshape(-1, 1))\n\n return(features,features_test,features_oos)",
"def load_poly_features(df_train, df_test):\n logger = logging.getLogger(__name__)\n logger.debug('Loading polynomial features..')\n # Make a new dataframe for polynomial features\n poly_features = df_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n poly_features_test = df_test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n\n # imputer for handling missing values\n imputer = Imputer(strategy='median')\n\n # Need to impute missing values\n poly_features = imputer.fit_transform(poly_features)\n poly_features_test = imputer.transform(poly_features_test)\n\n # Create the polynomial object with specified degree\n poly_transformer = PolynomialFeatures(degree=3)\n # Train the polynomial features\n poly_transformer.fit(poly_features)\n\n # Transform the features\n poly_features = poly_transformer.transform(poly_features)\n poly_features_test = poly_transformer.transform(poly_features_test)\n logger.debug('Polynomial Features shape: %s' % str(poly_features.shape))\n\n df_poly_features = pd.DataFrame(poly_features,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features_test = pd.DataFrame(poly_features_test,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features['SK_ID_CURR'] = df_train['SK_ID_CURR']\n df_poly_features_test['SK_ID_CURR'] = df_test['SK_ID_CURR']\n logger.info('Loaded polynomial features')\n return df_poly_features, df_poly_features_test",
"def transform_data(features):\n\n def cart2pol(my_row):\n x = my_row[0]\n y = my_row[1]\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return [rho, phi]\n\n #vfunc = np.vectorize(cart2pol)\n #transformed = vfunc(features)\n out = []\n #print(features, \"\\n\\n\")\n for row in features:\n out.append(cart2pol(row))\n out = np.array(out)\n #print(\"out is\\n\", out)\n \"\"\"\n x = out[:,0]\n y = out[:,1]\n \n plt.scatter(x,y)\n plt.show()\n \"\"\"\n return out",
"def generate_coefficients_data(poly_degree: int, performance_data: pd.DataFrame, param_columns: typing.List) -> pd.DataFrame:\n if poly_degree != 2:\n logging.warning('Not Implemented: polynomial degree of > 2. Will use degree 2 for meta-model')\n coef_names = get_coefficient_names()\n results = []\n for idx, task_id in enumerate(performance_data['task_id'].unique()):\n frame_task = performance_data.loc[performance_data['task_id'] == task_id]\n model = sklearn.linear_model.LinearRegression(fit_intercept=False)\n poly_feat = sklearn.preprocessing.PolynomialFeatures(2)\n X = poly_feat.fit_transform(frame_task[param_columns])\n y = frame_task['predictive_accuracy']\n model.fit(X, y)\n result = {\n 'task_id': task_id,\n coef_names[0]: model.coef_[0],\n coef_names[1]: model.coef_[1],\n coef_names[2]: model.coef_[2],\n coef_names[3]: model.coef_[3],\n coef_names[4]: model.coef_[4],\n coef_names[5]: model.coef_[5],\n }\n results.append(result)\n return pd.DataFrame(results).set_index('task_id')",
"def predict(self, X) :\n if self.coef_ is None :\n raise Exception(\"Model not initialized. Perform a fit first.\")\n\n X = self.generate_polynomial_features(X) # map features\n\n ### ========== TODO : START ========== ###\n # part c: predict y\n # for this we first get the single value of feature vector, then X in the transposed form and then we have to multiply by Theta\n\n y = np.dot(X, self.coef_)#coef is the coef matrix\n ### ========== TODO : END ========== ###\n\n\n return y",
"def expand_features(x, degree):\n N = x.shape[0]\n D = x.shape[1]\n \n # Matrix to be returned\n phi = np.ones((N, 1))\n # Holds X^deg\n xdeg = np.ones((N, D))\n for deg in range (1,degree+1) :\n xdeg *= x\n phi = np.c_[phi, xdeg] \n \n return phi",
"def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError",
"def build_poly(x, degree):\n \n X = np.vander((x[:,0]).T, degree+1, increasing=True)\n \n for i in range(1,np.shape(x)[1],1):\n feat = (x[:,i]).T\n vander = np.vander(feat, degree+1, increasing=True)\n #remove the column of 1 at the beginning of each vander\n vander = np.delete(vander, 0,axis = 1)\n #concatenation\n X = np.concatenate((X, vander), axis=1)\n \n return X",
"def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T",
"def polynomial_variables(self):\n return self._polynomial_variables",
"def poly_regression(self,precision=8):\n # return empty lists if input is empty\n if self.training == []:\n return [],[]\n\n latitudes = []\n longitudes = []\n for point in self.training[:-1]:\n latitudes.append(point[0])\n longitudes.append(point[1]) \n # store everything in a dataframe\n latDf = pd.DataFrame(numpy.array(latitudes), columns=['latitudes'])\n longDf = pd.DataFrame(numpy.array(longitudes), columns=['longitudes'])\n\n # learn how to do regression\n reg = linear_model.LinearRegression()\n\n # pass the order of your polynomial here \n poly = PolynomialFeatures(precision)\n\n \n # regression with latitude as domain\n vertical_predicted_path = []\n transform = poly.fit_transform(longDf)\n\n reg.fit(transform,latDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n vertical_predicted_path.append([predictions[i][0],longDf[\"longitudes\"][i]])\n\n \n # regression with longitude domain\n horizontal_predicted_path = []\n transform = poly.fit_transform(latDf)\n\n reg.fit(transform,longDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n horizontal_predicted_path.append([latDf[\"latitudes\"][i], predictions[i][0]])\n\n self.horizontal = sorted(horizontal_predicted_path, key=lambda k: [k[1], k[0]])\n self.vertical = sorted(vertical_predicted_path, key=lambda k: [k[0], k[1]])\n \n # return sorted horizontal and vertical prediction\n return self.horizontal, self.vertical",
"def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx",
"def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))",
"def get_conv_features(self, X):\n convfeatures = blah\n return convfeatures",
"def feature_map(self, x):\n if not self.use_resnet:\n return self.features(x)\n x = self.features.conv1(x)\n x = self.features.bn1(x)\n x = self.features.relu(x)\n x = self.features.maxpool(x)\n c2 = self.features.layer1(x)\n c3 = self.features.layer2(c2)\n c4 = self.features.layer3(c3)\n return c4",
"def yy(x):\r\n return Feature(x, \"YY\")",
"def zz(x):\r\n return Feature(x, \"ZZ\")",
"def _create_ploynomial_array(self, coeff, x):\n xarr = numpy.array(x)\n yarr = numpy.zeros(len(xarr))\n for idim in range(len(coeff)):\n ai = coeff[idim]\n yarr += ai*xarr**idim\n return yarr"
] | [
"0.7088703",
"0.69679934",
"0.66370046",
"0.6578305",
"0.6512583",
"0.6453376",
"0.63881963",
"0.6234599",
"0.6210643",
"0.6198964",
"0.61715096",
"0.61595196",
"0.6012623",
"0.60067403",
"0.595723",
"0.5934394",
"0.58879584",
"0.5873344",
"0.582992",
"0.5797094",
"0.57733417",
"0.5773121",
"0.57382864",
"0.5718979",
"0.57173944",
"0.57056445",
"0.5676183",
"0.56503546",
"0.5626869",
"0.5606366"
] | 0.77433574 | 0 |
Check GMail E.g. messages,unseen = imap.check_gmail('username.com','password') | def check_gmail(username, password):
i = imaplib.IMAP4_SSL('imap.gmail.com')
try:
i.login(username, password)
x, y = i.status('INBOX', '(MESSAGES UNSEEN)')
messages = int(re.search('MESSAGES\s+(\d+)', y[0]).group(1))
unseen = int(re.search('UNSEEN\s+(\d+)', y[0]).group(1))
return messages, unseen
except:
return False, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkEmail():\n\tpop_conn = poplib.POP3_SSL('pop.gmail.com')\n\tpop_conn.user('')\n\tpop_conn.pass_('')\n\t#Get messages from server:\n\tmessages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n\t# Concat message pieces:\n\tmessages = [\"\\n\".join(mssg[1]) for mssg in messages]\n\t#Parse message intom an email object:\n\tmessages = [parser.Parser().parsestr(mssg) for mssg in messages]\n\tflag = 0\n\tsweep = None\n\tfor message in messages:\n\t\tsubject = message['subject']\n\t\tif subject is None:\n\t\t\tcontinue\n\t\telif \"CommenceSweep:\" in subject:\n\t\t\tstart = subject.find(\":\")\n\t\t\tcommand = subject[start+1:]\n\t\t\tprint command\n\t\t\tif \"Comp\"+sys.argv[1] in command:\n\t\t\t\tstart = command.find(\"-\")\n\t\t\t\tsweep = command[start+1:]\n\t\t\t\tprint sweep\n\t\t\t\tpoplist = pop_conn.list()\n\t\t\t\tmsglist = poplist[1]\n\t\t\t\tfor msgspec in msglist:\n\t\t\t\t\tdelete = int(msgspec.split(' ')[0])\n\t\t\t\t\tpop_conn.dele(delete)\n\t\t\t\tflag = 1\n\tpop_conn.quit()\n\treturn flag, sweep",
"def check_email():\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(user, password)\n\n g = gmail.login(user, password)\n\n # Check for unread messages.\n unread = g.inbox().mail(unread=True)\n\n # Submit a job to lint each email sent to [email protected]. Record the\n # resulting job_ids somewhere (in Redis, I suppose), keyed by a hash of the\n # email.\n for u in unread:\n\n u.fetch()\n\n signature = (u.fr.decode('utf-8') +\n u.subject.decode('utf-8') +\n u.body.decode('utf-8'))\n\n hash = hashlib.sha256(signature.encode('utf-8')).hexdigest()\n\n if user_to in u.to or user_to in u.headers.get('Cc', []):\n\n job_id = conn.get(hash)\n\n if not job_id:\n # If the email hasn't been sent for processing, send it.\n r = requests.post(api_url, data={\"text\": u.body})\n conn.set(hash, r.json()[\"job_id\"])\n print(\"Email {} sent for processing.\".format(hash))\n\n else:\n # Otherwise, check whether the results are ready, and if so,\n # reply with them.\n r = requests.get(api_url, params={\"job_id\": job_id})\n\n if r.json()[\"status\"] == \"success\":\n\n reply = quoted(u.body)\n errors = r.json()['data']['errors']\n reply += \"\\r\\n\\r\\n\".join([json.dumps(e) for e in errors])\n\n msg = MIMEMultipart()\n msg[\"From\"] = \"{} <{}>\".format(name, user)\n msg[\"To\"] = u.fr\n msg[\"Subject\"] = \"Re: \" + u.subject\n\n if u.headers.get('Message-ID'):\n msg.add_header(\"In-Reply-To\", u.headers['Message-ID'])\n msg.add_header(\"References\", u.headers['Message-ID'])\n\n body = reply + \"\\r\\n\\r\\n--\\r\\n\" + tagline + \"\\r\\n\" + url\n msg.attach(MIMEText(body, \"plain\"))\n\n text = msg.as_string()\n server.sendmail(user, u.fr, text)\n\n # Mark the email as read.\n u.read()\n u.archive()\n\n print(\"Email {} has been replied to.\".format(hash))",
"def check_for_new_data():\n SCOPES = ['https://mail.google.com/']\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('creds_4.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n stamp = int(time.time()) - 3600\n # Call the Gmail API\n results = service.users().messages().list(userId='me',q=f\"from:[email protected] after:{stamp}\").execute()\n if results[\"resultSizeEstimate\"] > 0:\n populate_database()",
"def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('gmail', 'v1', http=http)\r\n\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"]).execute()\r\n messages = []\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n\r\n while 'nextPageToken' in response:\r\n page_token = response['nextPageToken']\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"], pageToken=page_token).execute()\r\n messages.extend(response['messages'])\r\n\r\n i = 0\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n message = service.users().messages().get(userId=USER_ID, id=msg_id).execute()\r\n for prop in message[\"payload\"][\"headers\"]:\r\n if prop[\"name\"] == \"From\":\r\n print(\"ID:\", i, \"\\tFrom:\", prop[\"value\"].encode('ascii','replace'), end=\"\\t\")\r\n elif prop[\"name\"] == \"Subject\":\r\n print(\"Subject:\", prop[\"value\"].encode('ascii','replace'))\r\n i += 1\r\n\r\n to_keep = raw_input(\"Do you want to keep any emails? [N / 0,1,...] \")\r\n if \",\" in to_keep:\r\n to_keep = to_keep.split(\",\")\r\n for i in range(len(to_keep)):\r\n to_keep[i] = int(to_keep[i])\r\n elif to_keep != \"N\":\r\n to_keep = [int(to_keep)]\r\n\r\n if isinstance(to_keep, list):\r\n for i in range(len(to_keep)-1,-1,-1):\r\n msg_labels = {'removeLabelIds': [\"SPAM\"], 'addLabelIds': [\"INBOX\"]}\r\n msg_id = messages[to_keep[i]][\"id\"]\r\n message = service.users().messages().modify(userId=USER_ID, id=msg_id, body=msg_labels).execute()\r\n del messages[to_keep[i]]\r\n\r\n # ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\r\n # filter0 = service.users().settings().filters().get(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # print(filter0)\r\n\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n # for prop in message[\"payload\"][\"headers\"]:\r\n # if prop[\"name\"] == \"From\":\r\n # start_email = prop[\"value\"].find(\"<\")\r\n # end_email = prop[\"value\"].find(\">\", start_email + 1)\r\n # email_address = prop[\"value\"][start_email + 1:end_email]\r\n # filter0[\"criteria\"][\"from\"] = filter0[\"criteria\"][\"from\"] + \" OR \" + email_address\r\n service.users().messages().delete(userId=USER_ID, id=msg_id).execute()\r\n\r\n # service.users().settings().filters().delete(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # service.users().settings().filters().create(userId=USER_ID, body=filter0).execute()\r\n print(\"All Spam Deleted!\")",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n scores = {} # scores is an empty dict already\n\n if os.path.getsize('token.pickle') > 0: \n with open('token.pickle', \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n # if file is not empty scores will be equal\n # to the value unpickled\n scores = unpickler.load()\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # Starts Gmail V1 with logged in user\n service = build('gmail', 'v1', credentials=creds)\n \n# ================================================================================== \n\n\n # MAIL CHECKHER ================================================================\n\n # get mails via gmail api\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n\n # mail number\n mail_nr = 0\n\n # variabel for how many mails we want to search through\n message_count = int(input(\"Hur många mails vill du söka igenom? \"))\n # if 0 mails are chosen\n if not messages:\n print('Inga mail i inkorgen')\n else:\n # looks through the email inbox for mails \"message_count\" amount of times\n for message in messages[:message_count]:\n # gets the email id's in full format so we can extraqct information via the gmail api\n msg = service.users().messages().get(userId='me', id=message['id'], format='full', metadataHeaders=None).execute()\n # gets the headers of the email in a variable\n headers = msg[\"payload\"][\"headers\"]\n # from headers gets the sender email, who it was from \n from_ = [i['value'] for i in headers if i[\"name\"]==\"From\"]\n # from headers gets the subject of the email\n subject = [i['value'] for i in headers if i[\"name\"]==\"Subject\"]\n # keeps count of the current email\n mail_nr += 1\n # if the email is from the security system email print it's information\n if from_ == ['Python Ormarna <[email protected]>'] or from_ == ['[email protected]']:\n # gets the email in raw format via gmail api\n rawmsg = service.users().messages().get(userId=\"me\", id=message[\"id\"], format=\"raw\", metadataHeaders=None).execute()\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är från erat säkerhetssystem\")\n # variable the UNIX time of when the email was sent\n datum = int(msg['internalDate'])\n datum /= 1000\n # prints the date and time when the email was revived in local y/m/d/h/m/s\n print(\"Mottaget:\", datetime.fromtimestamp(datum).strftime('%Y-%m-%d %H:%M:%S'))\n print(\"Från:\", from_)\n print(\"Ämne:\", subject)\n # prints a snippet from the email\n print(msg['snippet'])\n print(\"\\n\")\n else:\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är INTE från erat säkerhetssystem\\n\")\n time.sleep(1)\n print(\"Inga fler mail hittades\")",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n user_id = 'me'\n label_id_one = 'INBOX'\n label_id_two = 'UNREAD'\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n unread_msgs = service.users().messages().list(userId='me',labelIds=[label_id_one, label_id_two]).execute()\n mssg_list = unread_msgs['messages']\n print (\"Total unread messages in inbox: \", str(len(mssg_list)))\n final_list = [ ]\n\n for mssg in mssg_list:\n temp_dict = { }\n m_id = mssg['id'] # get id of individual message\n message = service.users().messages().get(userId=user_id, id=m_id).execute() # fetch the message using API\n payld = message['payload'] # get payload of the message \n headr = payld['headers'] # get header of the payload\n\n\n for one in headr: # getting the Subject\n if one['name'] == 'Subject':\n msg_subject = one['value']\n temp_dict['Subject'] = msg_subject\n else:\n pass\n\n\n for two in headr: # getting the date\n if two['name'] == 'Date':\n msg_date = two['value']\n date_parse = (parser.parse(msg_date))\n m_date = (date_parse.date())\n temp_dict['Date'] = str(m_date)\n else:\n pass\n\n for three in headr: # getting the Sender\n if three['name'] == 'From':\n msg_from = three['value']\n temp_dict['Sender'] = msg_from\n else:\n pass\n\n temp_dict['Snippet'] = message['snippet'] # fetching message snippet\n\n\n try:\n \n # Fetching message body\n mssg_parts = payld['parts'] # fetching the message parts\n part_one = mssg_parts[0] # fetching first element of the part \n part_body = part_one['body'] # fetching body of the message\n part_data = part_body['data'] # fetching data from the body\n clean_one = part_data.replace(\"-\",\"+\") # decoding from Base64 to UTF-8\n clean_one = clean_one.replace(\"_\",\"/\") # decoding from Base64 to UTF-8\n clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) # decoding from Base64 to UTF-8\n soup = BeautifulSoup(clean_two , \"lxml\" )\n mssg_body = soup.body()\n # mssg_body is a readible form of message body\n # depending on the end user's requirements, it can be further cleaned \n # using regex, beautiful soup, or any other method\n temp_dict['Message_body'] = mssg_body\n\n except :\n pass\n\n print (temp_dict)\n final_list.append(temp_dict) # This will create a dictonary item in the final list\n return final_list[:3]\n # This will mark the messagea as read\n #service.users().messages().list(userId=user_id, id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute() \n\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])",
"def main():\n \n ####GET ALL MESSAGES FROM GMAIL###\n # gmail_usr_name = raw_input(\"Enter the gmail user name: \\n\")\n # gmail_passwrd = getpass.getpass(\"Enter the Gmail password: \\n\")\n print(\"Please wait while message IDs for Gmail are populated...\")\n gmail_accumulator = Accumulator.Accumulator(GMAIL_PATH, \"usr_name\", \"passwrd\",\n IMAP_PORT, GMAIL_FOLDER)\n gmail_msg_ids = gmail_accumulator.get_ids()\n pprint.pprint(gmail_msg_ids)\n \n ####GET ALL MESSAGES FROM IMAP###\n #IMAP2_usr_name = raw_input(\"Enter the IMAP2 user name: \\n\")\n #IMAP2_passwrd = getpass.getpass(\"Enter the IMAP2 password: \\n\")\n print(\"Please wait while message IDs for IMAP are populated\")\n \n IMAP2_accumulator = Accumulator.Accumulator(\"imap2.lbl.gov\", \"usr_name\", \"passwrd\",\n IMAP_PORT, IMAP2_FOLDER)\n IMAP2_msg_ids = IMAP2_accumulator.get_ids()\n pprint.pprint(IMAP2_msg_ids)\n \n gmail_unique_ids = gmail_accumulator.get_unique_ids()\n ###FIND THE DIFFERENCES BETWEEN IMAP AND GMAIL.####\n compare_ids = Comparator.Comparator(IMAP2_msg_ids, gmail_unique_ids)\n diff_ids = compare_ids.compare()\n \n ###FIND THE DUPLICATE IDs FROM IMAP2.###\n \n dups = IMAP2_accumulator.get_duplicate_ids()\n dup_headers = header_info(dups, IMAP2_accumulator)\n print(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n print(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n \n print(\"-------------------------------------------------------------------------------------\")\n print(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2}\\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n print(\"--------------------------------------------------------------------------------------\")\n pprint.pprint(diff_ids)\n\n print(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n headers = header_info(diff_ids, IMAP2_accumulator)\n\n ###print a table of the info of the missing messages.###\n table = prettytable.PrettyTable([\"TO\", \"FROM\", \"SUBJECT\"])\n table.align[\"TO\"] = \"l\"\n table.padding_width = 1\n for hdr in headers:\n table.add_row(hdr)\n print(table)\n\n\n ###write the output to OUTPUT_FILE.###\n\n output_file = open(OUTPUT_FILE, 'w')\n output_file.write(\"\\n\")\n output_file.write(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n output_file.write(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n output_file.write(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2} \\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n output_file.write(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n for ids in diff_ids:\n output_file.write(str(ids))\n output_file.write(\"\\n\")\n output_file.write(\"\\n\")\n\n ###OUUTPUT THE TABLE###\n\n output_file.write(str(table)) \n output_file.write(LINE_SEPARATOR)\n\n output_file.close()\n\n ucb.interact()",
"def gmail(screen):\n\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n\n\n if not labels:\n print('No labels found.')\n else:\n if PRINT_CATEGORY: print('Labels:')\n for label in labels:\n if PRINT_CATEGORY: print(label['name'])\n if label['name']=='UNREAD':\n listMessages = ListMessagesWithLabels(service, 'me', label['name'])\n nbMessages = len(listMessages)\n nbMess = 0\n\n printTerminal('ENZO! Tu as ['+str(nbMessages)+'] messages non lus.',True)\n say('ENZO! Tu as: '+str(nbMessages)+' messages non lus.')\n\n for message in listMessages:\n #print(GetMessage(service, 'me', message['id'], False))\n nbMess+=1\n ggMessage = GetMessage(service, 'me', message['id'], False)\n #print(ggMessage)\n\n #msg_str = base64.urlsafe_b64decode(ggMessage['raw'].encode('ASCII'))\n #print(msg_str)\n\n for header in ggMessage['payload']['headers']:\n #print(header)\n if header['name']=='Subject':\n #unicode(text,'utf-8')\n #screen.addstr(0,1,\"\")\n if screen:\n screen.addstr(str(nbMess)+'] '+header['value'])\n say(header['value'])\n screen.refresh()\n else:\n print(str(nbMess)+'] '+header['value'])\n say(header['value'])\n #TTS(header['value'],'french', 50 ,2 )\n #status=subprocess.call([\"espeak\",\"-s 100 -v fr \",header['value']], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n #for part in ggMessage['payload']['parts']:\n # msg = base64.urlsafe_b64decode(part['body']['data'].encode('ASCII'))\n # print(removehtml(msg))\n #print(part['body']['data'])\n #say(part['body']['data'])\n if len(sys.argv) > 1:\n if sys.argv[1]=='-t':\n TTS(ggMessage,'french', 50 ,2 )\n #for toto in label:\n # print(toto)",
"def verify_email(nickname, quiet):\n\n try:\n account = Account.query.filter_by(nickname=nickname).one()\n except NoResultFound:\n print(f\"Account {nickname} not found\")\n return\n gmail = GmSync.from_account(account, load_config(not quiet))\n gmail.verify()",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])\n path = \"./ham\"\n try:\n os.mkdir(path)\n except OSError:\n print (\"Creation of the directory %s failed\" % path)\n else:\n print (\"Successfully created the directory %s \" % path)\n\n messages = []\n messages = ListMessagesMatchingQuery(service, 'me', 'in:inbox')\n idx = 0\n for message in messages:\n GetMimeMessage(service, 'me', message['id'], idx)\n idx+=1",
"def connect():\n\n mailBox = IMAP4_SSL('imap.gmail.com')\n\n if TESTING:\n mailBox.login(\"sapphirephoenix\", getpass.getpass())\n else:\n mailBox.login(raw_input(\"\\nUsername: \"), getpass.getpass())\n\n result, data = mailBox.select('INBOX', True) # INBOX [Gmail]/All Mail\n\n if result == \"OK\":\n print \"\\n* Connected to mailbox! *\\n\"\n else:\n print \"\\nERROR: Could not connect to mailbox\\n\"\n print \"\\n* Exiting... *\\n\"\n sys.exit(1)\n\n return mailBox",
"def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers",
"def main(): \r\n # Creating a storage.JSON file with authentication details\r\n # we are using modify and not readonly, as we will be marking the messages \r\n # as Read \r\n SCOPES = 'https://www.googleapis.com/auth/gmail.modify' \r\n store = file.Storage('storage.json') \r\n creds = store.get()\r\n if not creds or creds.invalid:\r\n flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\r\n creds = tools.run_flow(flow, store)\r\n GMAIL = discovery.build('gmail', 'v1', http=creds.authorize(Http()))\r\n \r\n messages_retrieved=0\r\n num_images=0\r\n save_dir,search_term = read_prefs() \r\n \r\n # Getting all the unread messages from Inbox\r\n unread_msgs = GMAIL.users().messages().list(userId='me',\r\n labelIds=['INBOX', 'UNREAD']).execute()\r\n \r\n # We get a dictonary. Now reading values for the key 'messages'\r\n try:\r\n mssg_list = unread_msgs['messages']\r\n print (\"Total unread messages in inbox: \", str(len(mssg_list)))\r\n except KeyError: #handle the keyerror on no new messages by exiting\r\n print ('No new messages - exiting.')\r\n return 0 \r\n \r\n #loop through the new messages list\r\n for i,mssg in enumerate(mssg_list):\r\n temp_dict = { }\r\n print(\"processing message {} of {}\".format(i+1,len(mssg_list)))\r\n m_id = mssg['id'] # get id of individual message\r\n # fetch the message using API\r\n message = GMAIL.users().messages().get(userId='me', id=m_id).execute() \r\n payld = message['payload'] # get payload of the message \r\n header = payld['headers'] # get header of the payload\r\n \r\n for field in header: # getting the Subject\r\n if field['name'] == 'Subject':\r\n msg_subject = field['value']\r\n temp_dict['Subject'] = msg_subject\r\n if field['name'] == 'Date':\r\n msg_date = field['value']\r\n date_parse = (parser.parse(msg_date))\r\n m_date = (date_parse.date())\r\n temp_dict['Date'] = str(m_date)\r\n else:\r\n pass\r\n \r\n try:\r\n\r\n # Fetching message body\r\n try: #if there is html/data only\r\n part_data = payld['body']['data'] # fetching data\r\n except: #if there are multiple parts get the html part\r\n part_data = payld['parts'][0]['body']['data'] # fetching data from the body\r\n # decoding from Base64 to UTF-8\r\n clean_one = part_data.replace(\"-\",\"+\").replace(\"_\",\"/\")\r\n clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) \r\n \r\n if search_term in temp_dict['Subject']:\r\n img_list= soup_process_email(clean_two.decode(\"utf8\"))\r\n print ('{} images found.'.format(len(img_list)))\r\n \r\n for i in (img_list):\r\n print (\"downloading: \" +i.split('/')[-1])\r\n #adding the email date to filepath\r\n write_dir=save_dir+temp_dict['Date']+'/'\r\n #checking if path exists (and making it if not)\r\n ensure_dir(write_dir) \r\n if \".jpg\" in i:\r\n # adding filename to write path\r\n write_dir=write_dir+\"/\"+i.split('/')[-1] \r\n else:\r\n # adding 'mp4' extension to movies and removing leading '?'\r\n filename=str(i.split('/')[-1])\r\n write_dir=write_dir+\"/\"+filename[1:]+\".mp4\" \r\n # check if file exists\r\n if not does_file_exist(write_dir):\r\n time.sleep(1) #rate limiting\r\n urllib.request.urlretrieve(i, write_dir) #downloading\r\n # num_images+=len(img_list)\r\n num_images+=1\r\n else:\r\n print ('file already downloaded')\r\n \r\n else:\r\n pass \r\n messages_retrieved+=1\r\n except Exception as e:\r\n print (\"Unexpected error:\", sys.exc_info()[0])\r\n print (\"Unexpected error:\", sys.exc_info()[1])\r\n print (\"Unexpected error:\", sys.exc_info()[2])\r\n except:\r\n pass \r\n \r\n #### This will mark the messages as read. when testing is complete\r\n GMAIL.users().messages().modify(userId='me', \r\n id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute() \r\n \r\n \r\n print (\"Total messages retrieved: \", messages_retrieved)\r\n print (\"Total images retrieved: \", num_images)",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Calls the Gmail API to get Emails\n threads = listMessages(service, 'me', 'Jay Patel,')\n\n if not threads:\n print('No TUalerts found.')\n else:\n getCrimeLocation(service, 'me', threads)\n\n # Prints the TUlalerts (Mostly for testing purposes)\n printAlerts()",
"def checkMsgNum(m):\n #mboxes = m.list()[1] Show all boxes\n m.select(\"INBOX\")#Select mailbox\n #data = m.search(None, \"(FROM \\\"[email protected]\\\")\") Search specific email addy\n #Change \"[email protected]\" in above to a specific email address to enable search from particular user\n items = m.search(None, \"(UNSEEN)\")\n msgNum = str(items[1]).rsplit(None)[-1].strip('[\\']')\n\n return msgNum",
"def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])",
"def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n labels = ListLabels(service, 'me')\n\n messages = ListMessagesWithLabels(service, 'me', label_ids=[\"CATEGORY_FORUMS\"])",
"def email_startup():\n imap = imaplib.IMAP4_SSL('imap.gmail.com')\n # authenticate\n imap.login(email_credentials.email_user, email_credentials.email_pass)\n return imap",
"def get_unread_email_ids(gmail_client):\n response = gmail_client.users().messages().list(userId='me',q='is:unread').execute()\n\n if 'messages' in response: # messages key only exists if there are unread messages\n return [message['id'] for message in response['messages']]\n else:\n print(\"No unread messages...\")\n return [] # still return a list since that's what caller expects",
"def checkUserEmail(email, number):\n\n if number == 1:\n flag = 0\n atflag = False\n for i in email:\n if i == \"@\":\n atflag=True\n flag += 1\n if i == \".\" and atflag:\n flag += 1\n if flag != 2:\n return False\n return True\n if number == 2:\n c.execute(\"select email from users where email = ?\", (email,))\n if c.fetchall():\n return True\n return False",
"def reachable(self):\n service = build('gmail', 'v1', http=Http(timeout=1.0))\n url = urlparse.urlparse(service._baseUrl)\n host = url.hostname\n port = url.port\n try:\n socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)\n except (socket.herror, socket.gaierror, URLError, OSError):\n return False\n return True",
"def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='subject:tradingview')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n print (mes)\n\n\n j = 0\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('hhhhhhhhhhhhh')\n elif contentType == 'text/plain' or contentType == 'text/html': \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n contentTxt = re.compile('<[^>|a]+>').sub('',content)\n print (reg.sub('',content))\n #end if \n\n\n \n # help(mes)\n # for i in mes.values():\n # print (i)\n # # print (mes[i]);\n # print (\"----------\")\n # print (mes['from'])\n # print (type (mes))\n # # print \n # parsed = Parser().parsestr(mes)\n # print (parsed)\n # print (mes)\n # for i in mes:\n # print (i)\n # for item in lists:\n # mes = GetMimeMessage(service,user_id = 'me',msg_id = item['id'])\n # # print (mes)\n # parsed = Parser().parsestr(mes)\n # print (parsed)",
"def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)",
"def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n user_id = 'me'\n\n ## get_labels ##\n #print_all_labels(service,user_id)\n #fetch_and_store(service,user_id)\n #apply_rules()",
"def filter_unread(check_what, criteria, return_what):\n imap = imaplib.IMAP4_SSL(config[\"email\"][\"server\"])\n imap.login(config[\"email\"][\"user\"], config[\"email\"][\"pass\"])\n status, messages = imap.select(\"INBOX\")\n \n status, response = imap.search(None, '(UNSEEN)')\n unread_msg_nums = response[0].split()\n\n ret = [] \n for i in unread_msg_nums:\n parse_return = parse(imap, i, check_what, criteria, return_what)\n if parse_return is not None:\n ret.append(parse_return)\n set_unseen(imap, i)\n imap.close()\n imap.logout()\n\n return ret",
"def go():\n # Authenticate\n print('****************** Authenticate ******************')\n\n creds = None\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n print('****************** Load Token ******************')\n\n creds = pickle.load(token)\n\n if not creds or not creds.valid:\n print('****************** Credentials ******************')\n\n if creds and creds.expired and creds.refresh_token:\n print('****************** Refresh Credentials ******************')\n\n creds.refresh(Request())\n else:\n print('****************** Load Credentials ******************')\n\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n with open('token.pickle', 'wb') as token:\n print('****************** Dump Token ******************')\n\n pickle.dump(creds, token)\n\n print('****************** Load Service ******************')\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Set Date Range\n print('****************** Set Date Range ******************')\n \n start_datetime = datetime.today() - timedelta(days=2)\n end_datetime = datetime.today() + timedelta(days=2)\n\n start_date = start_datetime.strftime(\"%Y/%m/%d\")\n end_date = end_datetime.strftime(\"%Y/%m/%d\")\n\n print(start_date)\n print(end_date)\n\n # Set Query\n print('****************** Set Query ******************')\n\n user_id = 'me'\n full = 'full'\n query = 'after:' + start_date + ' before:' + end_date + ' subject:Your Single Transaction Alert from Chase'\n\n print(query)\n\n # List Messages (All Pages)\n print('****************** Run Query ******************')\n\n response = service.users().messages().list(userId=user_id, q=query).execute()\n\n messages_all_pages = []\n\n if 'messages' in response:\n messages_all_pages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()\n messages_all_pages.extend(response['messages'])\n\n messages = messages_all_pages\n\n # Find Transactions in Message List\n if not messages:\n print('No messages found...')\n else:\n for message in messages:\n queue_id = message['id']\n\n # Get Message\n this_message = service.users().messages().get(userId=user_id, id=queue_id, format=full).execute()\n\n # Set Message\n message_body = this_message['payload']['body']['data']\n message_html = base64.urlsafe_b64decode(message_body)\n message_text = message_html.decode('utf-8').replace('($USD) ', '')\n\n # Set Transaction Date\n date_message = int(this_message['internalDate'])\n date_object = (date_message / 1000)\n transaction_date = datetime.fromtimestamp(date_object).strftime(\"%Y-%m-%d\")\n\n # Set Amount\n amount = re.search('A charge of (.+?) at', message_text).group(1)\n\n # Set Description\n description = re.search('at (.+?) has', message_text).group(1)\n\n # Build Transaction\n transaction = {\n 'QueueID': queue_id,\n 'TransactionTypeID': 2,\n 'TransactionDT': transaction_date,\n 'Description': description,\n 'Amount': amount,\n 'BudgetCategoryID': '103',\n 'TransactionNumber': '',\n 'Note': 'CC'\n }\n\n print('****************** Transaction Found ******************')\n print(transaction)\n\n # Send to Queue\n response_data = requests.post(url=BUDGET_API, data=transaction)\n\n result = response_data.text\n\n if result == '1':\n print('****************** Transaction Queued ******************')",
"def get_unread_count(username, password):\n obj = imaplib.IMAP4_SSL('imap.gmail.com', '993')\n obj.login(username, password)\n obj.select('Inbox')\n message_ids = obj.search(None, \"UNSEEN\")[1]\n list_of_split_strings = str(message_ids).split(\" \")\n unread = len(list_of_split_strings)\n # speak(str(unread))\n return unread",
"def connect(self):\n\n mail = Account(self.email, oauth2_token=self.access_token)\n trash_folder = mail.trash_mailbox()\n if pygmail.errors.is_error(trash_folder):\n return False\n else:\n self.mail = mail\n self.trash_folder = trash_folder\n self.inbox = mail.all_mailbox()\n return True",
"def main():\n token = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-token.json'\n credential = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-credentials.json'\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(token):\n creds = Credentials.from_authorized_user_file(token, SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credential, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n # with open('token.json', 'w') as token:\n # token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n\n # # Call the Gmail API\n # results = service.users().labels().list(userId='me').execute()\n # labels = results.get('labels', [])\n #\n # if not labels:\n # print('No labels found.')\n # else:\n # print('Labels:')\n # for label in labels:\n # print(label['name'])\n\n # Call the Gmail API to fetch INBOX\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n # message1 = messages[0]\n # print(message1)\n message1 = {'id': '17a5ca5f5f4bd0aa', 'threadId': '17a5b1bb861b3bc2'}\n message1 = {'id': '17a5cbc54c546465', 'threadId': '17a5b1bb861b3bc2'}\n\n # message1 = {'id': '17a5b852afe04a52', 'threadId': '17a50c997c059e68'}\n print(messages)\n print(message1)\n\n if not messages:\n print(\"No messages found.\")\n else:\n print(\"Message snippets:\")\n # for message in messages:\n # msg = service.users().messages().get(userId='me', id=message['id']).execute()\n # print(messages)\n # print(msg['snippet'])\n\n # msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n # print(msg['snippet'])\n ###############################\n msg = service.users().messages().get(userId='me', id=message1['id'], format='raw').execute()\n msg_str = base64.urlsafe_b64decode(msg['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n print(msg['snippet'])\n print(mime_msg)\n print(mime_msg['Date'])\n print(mime_msg['From'])\n print(mime_msg['To'])\n print(mime_msg['Subject'])\n #\n # print(datetime.utcnow())\n\n ######################################################\n # msg = service.users().messages().get(userId='me', id=message1['id'], format='full').execute()\n # # parts can be the message body, or attachments\n # payload = msg['payload']\n # headers = payload.get(\"headers\")\n # parts = payload.get(\"parts\")\n # # print(payload)\n # # print(parts)\n # # print(headers)\n # for header in headers:\n # print(header['name'])\n # print(header['value'])\n #\n ######################################################\n msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n\n # Use try-except to avoid any Errors\n try:\n # Get value of 'payload' from dictionary 'txt'\n payload = msg['payload']\n headers = payload['headers']\n subject = ''\n sender = ''\n\n # Look for Subject and Sender Email in the headers\n for d in headers:\n if d['name'] == 'Subject':\n subject = d['value']\n if d['name'] == 'From':\n sender = d['value']\n # The Body of the message is in Encrypted format. So, we have to decode it.\n # Get the data and decode it with base 64 decoder.\n parts = payload.get('parts')[0]\n data = parts['body']['data']\n data = data.replace(\"-\", \"+\").replace(\"_\", \"/\")\n decoded_data = base64.b64decode(data)\n\n # Now, the data obtained is in lxml. So, we will parse\n # it with BeautifulSoup library\n soup = BeautifulSoup(decoded_data, \"lxml\")\n body = soup.body()\n\n # Printing the subject, sender's email and message\n print(\"Subject: \", subject)\n print(\"From: \", sender)\n print(\"Message: \", body)\n # for link in soup.find_all('a', href=True):\n # print(link['href'])\n link = soup.find('a', href=True)\n print(link['href'])\n except:\n pass"
] | [
"0.70329154",
"0.6592309",
"0.64842856",
"0.636004",
"0.6350439",
"0.62164766",
"0.6210396",
"0.6174336",
"0.60854125",
"0.6038846",
"0.6019747",
"0.5920456",
"0.58898234",
"0.5836752",
"0.5829357",
"0.57975626",
"0.57911247",
"0.57865626",
"0.5757906",
"0.57237977",
"0.56982785",
"0.5670466",
"0.56280106",
"0.5623399",
"0.56136477",
"0.5565839",
"0.55417943",
"0.553826",
"0.54999584",
"0.5481485"
] | 0.8364727 | 0 |
Converts a raw packet to a dpkt packet regarding of link type. | def iplayer_from_raw(raw, linktype=1):
if linktype == 1: # ethernet
pkt = dpkt.ethernet.Ethernet(raw)
ip = pkt.data
elif linktype == 101: # raw
ip = dpkt.ip.IP(raw)
else:
raise Exception("unknown PCAP linktype")
return ip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_packet(linktype, packet):\n link_layer = parse_Ethernet(packet) if linktype == pcapy.DLT_EN10MB else parse_Cooked(packet)\n if link_layer['payload_type'] in ['IPv4', 'IPv6']:\n network_layer = parse_IPv4(link_layer['payload']) if link_layer['payload_type'] == 'IPv4' else parse_IPv6(link_layer['payload'])\n if network_layer['payload_type'] in ['UDP', 'TCP']:\n transport_layer = parse_UDP(network_layer['payload']) if network_layer['payload_type'] == 'UDP' else parse_TCP(network_layer['payload'])\n return (link_layer, network_layer, transport_layer)",
"def packetize(cls, source, raw_data):\n pkt = cls(source, raw_data)\n\n if pkt.type not in DGTL.descriptors.keys():\n raise Warning('Unsupported packet type! (%s)' % pkt.type)\n\n pkt.set_decoder(DGTL.descriptors[pkt.type][2])\n\n return pkt",
"def flowtuple_from_raw(raw, linktype=1):\n ip = iplayer_from_raw(raw, linktype)\n\n if isinstance(ip, dpkt.ip.IP):\n sip, dip = socket.inet_ntoa(ip.src), socket.inet_ntoa(ip.dst)\n proto = ip.p\n sport, dport = 0, 0\n l3 = ip.data\n # try to get the layer 3 source and destination port, but its ok if this fails,\n # which will happen when we get IP fragments or packets with invalid checksums\n try:\n sport, dport = l3.sport, l3.dport\n except AttributeError:\n pass\n\n else:\n sip, dip, proto = 0, 0, -1\n sport, dport = 0, 0\n\n flowtuple = (sip, dip, sport, dport, proto)\n return flowtuple",
"def payload_from_raw(raw, linktype=1):\n ip = iplayer_from_raw(raw, linktype)\n try: return ip.data.data\n except:\n return \"\"",
"def to_network_layer(packet):\r\n print(f\"[to_network_layer] packet:{packet}\")",
"def decode(cls, data):\n status = struct.unpack('B', data[1])[0]\n # Power ACK is bit 2\n power_ack = (status & 0x04) >> 2\n # Datarate ACK is bit 1\n datarate_ack = (status & 0x02) >> 1\n # Channelmask ACK is bit 0\n channelmask_ack = status & 0x01\n return LinkADRAns(power_ack, datarate_ack, channelmask_ack)",
"def make_packet(self, type, data): \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()",
"def __str__(self):\n return '\\n%(source)s > %(type)s (0x%(type_d).2x)\\n%(data)s' % \\\n {'type': DGTL.pkt_type_str[self.type], 'type_d': self.type,\n 'data': str(self.decoded) if self.decoded else 'Unknown raw data.',\n 'source': self.source}",
"def decode(cls, raw: bytes) -> \"EthernetHeader\":\n # unsigned char dmac[6];\n # unsigned char smac[6];\n # uint16_t ethertype;\n # unsigned char payload[];\n dmac = raw[:6]\n smac = raw[6:12]\n typ = socket.htons(struct.unpack(\"H\", raw[12:14])[0])\n payload = raw[14:]\n return EthernetHeader(dmac=dmac, smac=smac, typ=typ, payload=payload)",
"def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}",
"def as_packet(self, ptype):\n return ctypes.cast(self.ptr, ctypes.POINTER(ptype))[0]",
"def decode(cls, data):\n if len(data) == 0:\n return None\n cid = struct.unpack('B', data[0])[0]\n if cid == LINKCHECKREQ:\n return LinkCheckReq.decode(data)\n elif cid == LINKADRANS:\n return LinkADRAns.decode(data)\n # TODO\n #elif cid == DUTYCYCLEANS:\n # return DutyCycleReq.decode(data)\n #elif cid == RXPARAMSETUPANS:\n # return RxParamSetupReq.decode(data)\n #elif cid == DEVSTATUSANS:\n # return DevStatusReq.decode(data)\n #elif cid == NEWCHANNELANS:\n # return NewChannelReq.decode(data)\n #elif cid == RXTIMINGSETUPANS:\n # return RxTimingSetupReq.decode(data)\n else:\n return None",
"def getRecord(self, rr):\n\t\tif rr.type == 1: return {'type': 'a', 'ipv4': socket.inet_ntoa(rr.rdata), 'hostname': rr.name}\n\t\telif rr.type == 28: return {'type': 'aaaa', 'ipv6': socket.inet_ntop(socket.AF_INET6, rr.rdata), 'hostname': rr.name}\n\t\telif rr.type == 5: return {'type': 'cname', 'hostname': rr.name, 'cname': rr.cname}\n\t\telif rr.type == 13: return {'type': 'hostinfo', 'hostname': rr.name, 'info': rr.rdata}\n\t\telif rr.type == 33: return {'type': 'srv', 'hostname': rr.srvname, 'port': rr.port, 'srv': rr.name.split('.')[-3], 'proto': rr.name.split('.')[-2]}\n\t\telif rr.type == 12: return {'type': 'ptr'}\n\t\telif rr.type == 16: return {'type': 'txt'}\n\t\telif rr.type == 10: return {'type': 'wtf'}",
"def gen_broadlink_from_raw(data, repeat=0):\n yield from b'\\x26' # IR\n yield from repeat.to_bytes(1, byteorder='big') # Repeat\n\n # all broadlink ir captures will end with\n # 0x00 0x0d 0x05, which is just a long\n # trailing silence in the command set.\n # On generation we just need to ensure\n # our data ends with silence.\n trailing_silience = -101502.0\n\n def encode_one(x):\n # v = abs(int(i / 32.84))\n v = abs(round(x * 269 / 8192))\n if v > 255:\n yield from b'\\x00'\n yield from v.to_bytes(2, byteorder='big')\n else:\n yield from v.to_bytes(1, byteorder='big')\n\n def encode_list(x):\n for i in raw.paired(raw.simplify(x), trailing_silience):\n yield from encode_one(i)\n\n c = bytearray(encode_list(data))\n count = len(c)\n yield from count.to_bytes(2, byteorder='little')\n yield from c\n\n # calculate total length for padding\n count += 4 # header+len+trailer\n count += 4 # rm.send_data() 4 byte header (not seen here)\n if count % 16:\n yield from bytearray(16 - (count % 16))",
"def unwrap(self, packet_raw):\n (self.ip_ihl_ver, self.ip_tos_ecn, self.ip_tot_len, self.ip_id, self.ip_flag_frag, \\\n self.ip_ttl, self.ip_proto) = unpack('!BBHHHBB', packet_raw[0:10])\n (self.ip_check) = unpack('H', packet_raw[10:12])\n (src_addr, dest_addr) = unpack('!4s4s', packet_raw[12:20])\n\n self.ip_ihl = self.ip_ihl_ver & 0x0f\n self.ip_ver = (self.ip_ihl_ver & 0xf0) >> 4\n self.ip_tos = (self.ip_tos_ecn & 0xfc) >> 2\n self.ip_ecn = self.ip_tos_ecn & 0x03\n self.ip_flag_df = (self.ip_flag_frag & 0x40) >> 14\n self.ip_flag_mf = (self.ip_flag_frag & 0x20) >> 13\n self.ip_frag_off = self.ip_flag_frag & 0x1f\n\n self.ip_saddr = socket.inet_ntoa(src_addr)\n self.ip_daddr = socket.inet_ntoa(dest_addr)\n self.data = packet_raw[self.ip_ihl*4:self.ip_tot_len]\n\n pesudo_ip_header = packet_raw[0:10] + pack('H', 0) + packet_raw[12:20]\n new_chksum = network_chksum(pesudo_ip_header)\n if self.ip_check != new_chksum:\n raise ValueError",
"def from_network_layer(buffer):\r\n packet = buffer.get_packet()\r\n # print(f'buffer.message:{buffer.message}')\r\n # if packet == None:\r\n # print(f\"[from_network_layer] packet:NULL\")\r\n print(f\"[from_network_layer] packet:{packet}\")\r\n return packet",
"def packet_to_str(packet: PacketDescription, simple_diagrams=False, force_show_frames='', show_timestamp=False) \\\n -> PacketDiagramDescription:\n protocol = packet.protocols_str\n note_color = ''\n packet_str = ''\n if 'NGAP' in protocol:\n if nas_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_nas_req)\n protocol = 'NAS req.'\n else:\n note_color = ' {0}'.format(color_nas_rsp)\n protocol = 'NGAP msg. or NAS rsp.'\n\n # Search NGAP messages\n ngap_matches = ngap_message_type_regex.finditer(packet.msg_description)\n ngap_message_types = [ngap_match.group(1) for ngap_match in ngap_matches if ngap_match is not None]\n if len(ngap_message_types) > 0:\n ngap_seen = set()\n ngap_seen_add = ngap_seen.add\n ngap_message_types = ['NGAP {0}'.format(x) for x in ngap_message_types if\n not (x in ngap_seen or ngap_seen_add(x))]\n\n # Search NAS messages\n nas_matches = nas_message_type_regex.finditer(packet.msg_description)\n nas_message_types = [nas_match.group(1) for nas_match in nas_matches if nas_match is not None]\n if len(nas_message_types) > 0:\n # Remove duplicates: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order\n nas_seen = set()\n nas_seen_add = nas_seen.add\n nas_message_types = ['NAS {0}'.format(x) for x in nas_message_types if\n not (x in nas_seen or nas_seen_add(x))]\n\n # Print msg. type\n joint_ngap_nas_msg_types = ngap_message_types + nas_message_types\n if len(joint_ngap_nas_msg_types) > 0:\n protocol = '{0}'.format(',\\\\n'.join(joint_ngap_nas_msg_types))\n\n elif 'HTTP' in protocol:\n # Some customized filtering based on what we have seen\n rsp_match = http_rsp_regex.search(packet.msg_description)\n req_match = http_url_regex.search(packet.msg_description)\n if ('404 page not found' in packet.msg_description) or (rsp_match is not None):\n note_color = ' {0}'.format(color_http2_rsp)\n if rsp_match is not None:\n protocol = '{0} {1} rsp.'.format(protocol, rsp_match.group(1))\n else:\n protocol = protocol + ' 404 rsp.'\n elif req_match is not None:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req. or rsp. (no HTTP/2 headers)'\n\n match = list(http_url_regex.finditer(packet.msg_description))\n if len(match) > 0:\n method = ''\n method_match_all = http_method_regex.finditer(packet.msg_description)\n protocols = []\n for idx, method_match in enumerate(method_match_all):\n method = '{0} '.format(method_match.group(1))\n url_split = match[idx].group(1).split('?')\n protocols.append('{0} {1}'.format(method, url_split[0]))\n protocol = '{0}\\\\n'.format(protocol) + '\\\\n'.join(protocols)\n\n elif 'PFCP' in protocol:\n if pfcp_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_pfcp_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_pfcp_rsp)\n protocol = protocol + ' rsp.'\n\n match = pfcp_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'GTPv2' in protocol:\n if gtpv2_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_gtpv2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_gtpv2_rsp)\n protocol = protocol + ' req., rsp. or notification'\n\n match = gtpv2_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'Diameter' in protocol or 'RADIUS' in protocol or \"GTP'\" in protocol:\n note_color = ' {0}'.format(color_diameter_radius_gtpprime)\n protocol = get_diam_description(packet)\n\n if show_timestamp:\n try:\n dt_object = datetime.fromtimestamp(packet.timestamp)\n if dt_object.tzinfo is None:\n tz_str = ''\n else:\n tz_str = ' {0}'.format(dt_object.tzinfo)\n timestamp_hour = ' ({0}:{1}:{2}.{3}{4})'.format(dt_object.hour, dt_object.minute, dt_object.second,\n dt_object.microsecond / 1000, tz_str)\n except:\n timestamp_hour = ''\n protocol = '{0}\\\\n+{1:.3f}s{2}'.format(protocol, packet.timestamp_offsett, timestamp_hour)\n\n frame_number = packet[2]\n packet_str = packet_str + '\"{0}\" -> \"{1}\": {2}, {3}\\n'.format(packet.ip_src, packet.ip_dst, frame_number, protocol)\n packet_str = packet_str + '\\nnote right{0}\\n'.format(note_color)\n\n force_show_frames = [e.strip() for e in force_show_frames.split(',')]\n if simple_diagrams and frame_number not in force_show_frames:\n packet_payload = ''\n else:\n packet_payload = packet.msg_description\n\n if packet_payload != '':\n packet_str = packet_str + '**{0} to {1}**\\n{2}\\n'.format(packet.ip_src, packet.ip_dst, packet_payload)\n else:\n packet_str = packet_str + '**{0} to {1}**\\n'.format(packet.ip_src, packet.ip_dst)\n packet_str = packet_str + 'end note\\n'\n packet_str = packet_str + '\\n'\n return PacketDiagramDescription(packet_str, packet.ip_src, packet.ip_dst, protocol)",
"def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet",
"def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise",
"def read_raw_packet(self):\n\n size = 0\n\n # Read our two-byte header from the debugger...\n while not size:\n size = (self._get_next_byte() << 16) | self._get_next_byte()\n\n # ... and read our packet.\n packet = bytearray([self._get_next_byte() for _ in range(size)])\n\n # Return our packet.\n # TODO: extract and provide status flags\n # TODO: generate a timestamp on-device\n return packet, datetime.now(), None",
"def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }",
"def get_packet_type(pkt: packet.Packet) -> dict:\n\n pkt_metadata = {}\n pkt_metadata[\"type\"] = \"unsupported\"\n\n for index, protocol in enumerate(pkt.protocols, start=0):\n if type(protocol) == ipv4.ipv4:\n pkt_metadata[\"ipv4\"] = index\n pkt_metadata[\"ipv4_src\"] = protocol.src\n pkt_metadata[\"ipv4_dst\"] = protocol.dst\n elif type(protocol) == tcp.tcp:\n pkt_metadata[\"type\"] = \"tcp\"\n pkt_metadata[\"tcp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == udp.udp:\n pkt_metadata[\"type\"] = \"udp\"\n pkt_metadata[\"udp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == icmp.icmp:\n pkt_metadata[\"type\"] = \"icmp\"\n pkt_metadata[\"icmp\"] = index\n pkt_metadata[\"icmp_type\"] = protocol.type\n pkt_metadata[\"icmp_code\"] = protocol.code\n\n return pkt_metadata",
"def decode_packet(self, bytes):\n b64 = False\n if not isinstance(bytes, six.binary_type):\n bytes = bytes.encode('utf-8')\n\n packet_type = six.byte2int(bytes[0:1])\n if packet_type == ord('b'):\n binary = True\n bytes = bytes[1:]\n packet_type = int(chr(six.byte2int(bytes[0:1])))\n b64 = True\n elif packet_type >= ord('0'):\n packet_type = int(chr(packet_type))\n binary = False\n else:\n binary = True\n\n packet_data = None\n if len(bytes) > 1:\n if binary:\n if b64:\n packet_data = base64.b64decode(bytes[1:])\n else:\n packet_data = bytes[1:]\n else:\n packet_data = bytes[1:].decode('utf-8')\n\n return Packet(packet_type, packet_data, binary)",
"def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition",
"def _scapy_parse(packet: dict) -> Packet:\n try:\n payload_base64 = packet['packet']['payload'].encode()\n\n # assuming it has a Ethernet layer. Scapy will handle the rest.\n packet = Ether(base64.decodebytes(payload_base64))\n\n if IP in packet:\n return packet\n\n return None # actually not interested in packet not having IP layer\n except Exception as e: # FIXME\n logging.debug(e)",
"def pkt_type(self):\n return uint16_packer.unpack(self[32:34])[0]",
"def packet_decoder(packet_type,string):\n dct = json.loads(string)\n if packet_type == HS_Version:\n return HS_Version(dct['version'])\n if packet_type == HS_Options:\n return HS_Options(minport=dct['minport'], maxport=dct['maxport'],\n portusage=dct['portusage'], protocol=dct['protocol'],\n timeout=dct['timeout'], payload=dct['payload'],\n key=dct['key'])\n if packet_type == Data:\n return Data(data=dct['data'], terminate=int(dct['terminate']))\n if packet_type == Management:\n return Management(dct['command'],location=dct['location'])\n if packet_type == Switching:\n return Switching(dct['status'])\n if packet_type == Error:\n return Error()",
"def spoof_packet(packet):",
"def arp_parse(data):\n\t# Iteratize pkt\n\tpkt = packet.Packet(data)\n\ti = iter(pkt)\n\teth_pkt = next(i)\n\t\t# Ensure it's an ethernet frame.\n\tassert isinstance(eth_pkt, ethernet.ethernet)\n\n\tarp_pkt = next(i)\n\tif not isinstance(arp_pkt, arp.arp):\n\t\traise ARPPacket.ARPUnknownFormat()\n\n\tif arp_pkt.opcode not in (ARP_REQUEST, ARP_REPLY):\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported opcode %d' % arp_pkt.opcode)\n\n\tif arp_pkt.proto != ETH_TYPE_IP:\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported arp ethtype 0x%04x' % arp_pkt.proto)\n\n\treturn arp_pkt",
"def gen_raw_from_broadlink(data):\n v = iter(data)\n code = next(v)\n next(v) # repeat\n\n assert code == 0x26 # IR\n\n length = int.from_bytes(islice(v, 2), byteorder='little')\n assert length >= 3 # a At least trailer\n \n def decode_one(x):\n return round(x * 8192 / 269, 0)\n\n def decode_iter(x):\n sign = 1\n while True:\n try:\n d = next(x)\n except StopIteration:\n return\n if d == 0:\n d = int.from_bytes(islice(x, 2), byteorder='big')\n \n yield sign * decode_one(d)\n sign = sign * -1\n \n yield from decode_iter(islice(v, length))\n\n rem = list(v)\n if any(rem):\n LOG.warning(\"Ignored extra data: %s\", rem)"
] | [
"0.68164754",
"0.62622094",
"0.5889911",
"0.5856772",
"0.5697665",
"0.52689415",
"0.5258935",
"0.52217716",
"0.5157317",
"0.51023954",
"0.5042225",
"0.5041668",
"0.49423012",
"0.49230462",
"0.4902196",
"0.4897916",
"0.4865398",
"0.48552364",
"0.4845798",
"0.48386303",
"0.48383638",
"0.48315713",
"0.48302725",
"0.48204654",
"0.47920337",
"0.4756438",
"0.47318614",
"0.47158575",
"0.46967518",
"0.4692864"
] | 0.63520455 | 1 |
Extract all packets belonging to the same flow from a pcap packet iterator | def next_connection_packets(piter, linktype=1):
first_ft = None
for ts, raw in piter:
ft = flowtuple_from_raw(raw, linktype)
if not first_ft: first_ft = ft
sip, dip, sport, dport, proto = ft
if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):
break
yield {
"src": sip, "dst": dip, "sport": sport, "dport": dport, "proto": proto,
"raw": payload_from_raw(raw, linktype).encode("base64"), "direction": first_ft == ft,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()",
"def iter_packets(iterable):\n prev = None\n\n for i in sorted(iterable, key=attrgetter('seq')):\n if prev is None or prev.seq != i.seq:\n prev = i\n yield i",
"def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )",
"def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info",
"def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p",
"def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()",
"def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()",
"def read_pkt_seq(self):\n pkt = self.read_pkt_line()\n while pkt:\n yield pkt\n pkt = self.read_pkt_line()",
"def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l",
"def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()",
"def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()",
"def get_pcap_traffic_series(self):\n parsed_pcap_data = {}\n\n if (self.mac_address_binary is not None):\n parsed_pcap_data[self.mac_address_binary] = []\n\n with open(self.pcap_file_path, 'rb') as pcap_file:\n try:\n pcap = dpkt.pcap.Reader(pcap_file)\n for ts, buf in pcap:\n # Skip non ethernet frames\n try:\n eth = dpkt.ethernet.Ethernet(buf)\n except:\n continue\n\n # Skip non-IP packets\n if eth.type != 2048:\n continue\n \n # Apply eth filter\n if (self.mac_address_binary is not None):\n self.append_data(parsed_pcap_data, self.mac_address_binary, eth, ts)\n else:\n if (eth.src not in parsed_pcap_data):\n parsed_pcap_data[eth.src] = []\n if (eth.dst not in parsed_pcap_data):\n parsed_pcap_data[eth.dst] = []\n\n self.append_data(parsed_pcap_data, eth.src, eth, ts)\n self.append_data(parsed_pcap_data, eth.dst, eth, ts)\n except:\n print \"Error parsing file: %s\" % pcap_file\n \n # Remove mac addresses that didn't send data\n receivers_only = []\n for mac_addr in parsed_pcap_data:\n data_sent = False\n for data in parsed_pcap_data[mac_addr]:\n if (data[1] > 0):\n data_sent = True\n break\n if (not data_sent):\n receivers_only.append(mac_addr)\n\n for mac_addr in receivers_only:\n parsed_pcap_data.pop(mac_addr, None)\n\n # Sort the data \n for mac_addr in parsed_pcap_data:\n series = sorted(parsed_pcap_data[mac_addr], key=operator.itemgetter(0))\n parsed_pcap_data[mac_addr] = series\n\n return parsed_pcap_data",
"def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]",
"def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]",
"def parse_pkt_list(self, pkt_list):\n flow_pkts = {}\n for (t, pkt) in pkt_list:\n flowID = self.extract_flowID(pkt)\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(t, pkt)]\n else:\n flow_pkts[flowID].append((t,pkt))\n return flow_pkts",
"def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)",
"def cleanse(packets):\n pkts = []\n retran = False\n lost = False\n for pkt in packets:\n if len(pkt['data']) > 0:\n # If first packet just add and move on\n if len(pkts) == 0:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is = to this one add this pkt\n elif pkt['tcp']['seq_num'] == next_seq:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is > than this one there is a \n # Retransmission\n elif pkt['tcp']['seq_num'] < next_seq:\n retran = True\n elif pkt['tcp']['seq_num'] > next_seq:\n lost = True\n else:\n pass\n\n return pkts, retran, lost",
"def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None",
"def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)",
"def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame",
"def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")",
"def parse_pkt_list(self, log_pkt_list):\n flow_pkts = {}\n for pkt in log_pkt_list:\n flowID = pkt.flowID\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(pkt.time, pkt)]\n else:\n flow_pkts[flowID].append((pkt.time, pkt))\n return flow_pkts",
"def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))",
"def pkt_gen(self, flow_id):\n i = 0\n fin_time = 0\n while i < self.num_pkts:\n #j = 0\n burst_len = 0\n #pyld = ''.join(choice(ascii_uppercase) for k in range(randint(6, 1460)))\n pyld = ''.join(choice(ascii_uppercase) for k in range(202))\n # create the test packets\n pkt = Ether()/IP()/TCP()/Raw(load=pyld)\n fin_time = round((len(pkt)/self.quantum)/self.weight)\n pkt_id = (flow_id, i)\n tuser = Tuser(len(pkt), fin_time, pkt_id)\n burst_len += len(pkt)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n\n # write the pkt and metadata into storage\n self.pkt_out_pipe.put((pkt, tuser))\n\n #j += 1\n i += 1\n if i == self.num_pkts:\n break\n \n # wait a number of clock cycles equivalent to the transmission time of the burst of packets\n #for j in range(PREAMBLE + len(pkt) + IFG):\n #yield self.wait_line_clks(j*self.PREAMBLE + burst_len + j*self.IFG)\n #print (\"f: {} - pkt end: {}\".format(self.flow_id, self.env.now))\n pkt_time = self.PREAMBLE + burst_len + self.IFG\n yield self.wait_line_clks(pkt_time)\n # Insert gap to maintain bit rate\n idle_time = round(pkt_time * self.idle_frac/self.actv_frac)\n #yield self.wait_line_clks(idle_time) # average gap is 64 bytes\n print (\"pkt_time: {} idle_time: {}\".format(pkt_time, idle_time))",
"def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)",
"def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)",
"def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)",
"def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,\n max_data_length:int=10000):\n # NOTE: This has code duplication with the async version, think about how to solve this\n\n psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))\n packets_captured = 0\n\n data = b\"\"\n try:\n while self.is_open.value:\n try:\n packet, data = self.eventloop.run_until_complete(\n self._get_packet_from_stream(tshark_process.stdout, \n data,\n psml_structure=psml_structure,\n got_first_packet=packets_captured > 0, \n timeout=timeout))\n except EOFError:\n echo(\"Caught EOF\", file=Interceptor.stdout)\n self._log.debug(\"EOF reached (sync)\")\n break\n\n if(packet is False): continue\n\n if packet:\n packets_captured += 1\n yield packet\n if packet_count and packets_captured >= packet_count:\n break\n if len(data) > max_data_length:\n data = b''\n finally:\n if tshark_process in self._running_processes:\n self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))",
"def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)",
"def _convert_packets_into_batch(self, packets):\n def filter_non_bootstrap_nodes():\n for candidate, packet in packets:\n cid = packet[2:22]\n\n if not cid in self._communities and False: # candidate.sock_addr[0] in self._non_autoload:\n if __debug__:\n logger.warn(\"drop a %d byte packet (received from non-autoload node) from %s\", len(packet), candidate)\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:from bootstrap node for unloaded community\")\n continue\n\n yield candidate, packet\n\n packets = list(filter_non_bootstrap_nodes())\n if packets:\n return super(TrackerDispersy, self)._convert_packets_into_batch(packets)\n\n else:\n return []"
] | [
"0.6305733",
"0.6296941",
"0.6157458",
"0.5966342",
"0.58530146",
"0.58158106",
"0.5761478",
"0.5597372",
"0.5591383",
"0.5550107",
"0.5509687",
"0.5483427",
"0.5468516",
"0.54531425",
"0.53616995",
"0.52512836",
"0.5180603",
"0.51701915",
"0.5169873",
"0.5168804",
"0.51653785",
"0.5126963",
"0.51233846",
"0.5106145",
"0.50977445",
"0.5079675",
"0.5016412",
"0.50007594",
"0.49968618",
"0.49965763"
] | 0.67187 | 0 |
Open a PCAP, seek to a packet offset, then get all packets belonging to the same connection | def packets_for_stream(fobj, offset):
pcap = dpkt.pcap.Reader(fobj)
pcapiter = iter(pcap)
ts, raw = pcapiter.next()
fobj.seek(offset)
for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):
yield p | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()",
"def pcap(self, fname):\n\t\tcap = pcapy.open_offline(fname)\n\n\t\tself.map = []\n\t\tself.p = PacketDecoder()\n\t\tcap.loop(0, self.process)\n\n\t\treturn self.map",
"def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]",
"def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()",
"def find_dac():\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.bind((\"0.0.0.0\", 7654))\n\n\twhile True:\n\t\tdata, addr = s.recvfrom(1024)\n\t\tbp = BroadcastPacket(data)\n\t\t\n\t\tprint \"Packet from %s: \" % (addr, )\n\t\tbp.dump()",
"def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info",
"def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()",
"def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]",
"def recv_raw(self) -> Dict[str, Any]:\n while True:\n try:\n packet = self.__recv_frame()\n except UnknownPacketException:\n continue\n\n # Hack for sniffing on localhost\n if packet['address']['interface'] == 'lo' and packet['address']['type'] != 4:\n continue\n\n if self.address and self.port:\n if (\n packet['ip_header']['source_address'] == self.address and\n packet['tcp_header']['source_port'] == self.port\n ):\n return packet\n if (\n packet['ip_header']['destination_address'] == self.address and\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n elif self.address:\n if (\n packet['ip_header']['source_address'] == self.address or\n packet['ip_header']['destination_address'] == self.address\n ):\n return packet\n elif self.port:\n if (\n packet['tcp_header']['source_port'] == self.port or\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n else:\n return packet",
"def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections",
"def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break",
"def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)",
"def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)",
"def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()",
"def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict",
"def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)",
"def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets",
"def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False",
"def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )",
"def read_and_store_pcap(file_name):\r\n file = open(file_name, \"rb\")\r\n global_header = file.read(24).hex()\r\n byte = file.read(16)\r\n packets = []\r\n bytes = []\r\n sizes = []\r\n while byte:\r\n packet_header = byte.hex()\r\n # parse the size for each packet\r\n size = struct.unpack(\"<L\", codecs.decode(str(packet_header[16:24]), \"hex\"))[0]\r\n sizes.append(size)\r\n # read the whole packet by its size from the bytes\r\n byte = file.read(size).hex()\r\n bytes.append(byte)\r\n byte = file.read(16)\r\n for size in sizes:\r\n packets.append(([size], [], []))\r\n i = 0\r\n\r\n for pkt in bytes:\r\n packets = handle_pkt_header(pkt, packets, i)\r\n packets, start_point = handle_ip_header(pkt, packets, i)\r\n protocol = packets[i][1][7]\r\n if protocol == 1:\r\n packets = handle_icmp(pkt, packets, i, start_point)\r\n elif protocol == 6:\r\n packets = handle_tcp(pkt, packets, i, start_point)\r\n elif protocol == 17:\r\n packets = handle_udp(pkt, packets, i, start_point)\r\n i += 1\r\n # print(packets)\r\n return packets",
"def testParse(self):\n parser = pcap.PcapParser()\n storage_writer = self._ParseFile(['test.pcap'], parser)\n\n # PCAP information:\n # Number of streams: 96 (TCP: 47, UDP: 39, ICMP: 0, Other: 10)\n #\n # For each stream 2 events are generated one for the start\n # and one for the end time.\n\n self.assertEqual(storage_writer.number_of_events, 192)\n\n events = list(storage_writer.GetEvents())\n\n # Test stream 3 (event 6).\n # Protocol: TCP\n # Source IP: 192.168.195.130\n # Dest IP: 63.245.217.43\n # Source Port: 1038\n # Dest Port: 443\n # Stream Type: SSL\n # Starting Packet: 4\n # Ending Packet: 6\n\n event = events[6]\n self.assertEqual(event.packet_count, 3)\n self.assertEqual(event.protocol, 'TCP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '63.245.217.43')\n self.assertEqual(event.dest_port, 443)\n self.assertEqual(event.source_port, 1038)\n self.assertEqual(event.stream_type, 'SSL')\n self.assertEqual(event.first_packet_id, 4)\n self.assertEqual(event.last_packet_id, 6)\n\n # Test stream 6 (event 12).\n # Protocol: UDP\n # Source IP: 192.168.195.130\n # Dest IP: 192.168.195.2\n # Source Port: 55679\n # Dest Port: 53\n # Stream Type: DNS\n # Starting Packet: 4\n # Ending Packet: 6\n # Protocol Data: DNS Query for wpad.localdomain\n\n event = events[12]\n self.assertEqual(event.packet_count, 5)\n self.assertEqual(event.protocol, 'UDP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '192.168.195.2')\n self.assertEqual(event.dest_port, 53)\n self.assertEqual(event.source_port, 55679)\n self.assertEqual(event.stream_type, 'DNS')\n self.assertEqual(event.first_packet_id, 11)\n self.assertEqual(event.last_packet_id, 1307)\n self.assertEqual(\n event.protocol_data, 'DNS Query for wpad.localdomain')\n\n expected_message = (\n 'Source IP: 192.168.195.130 '\n 'Destination IP: 192.168.195.2 '\n 'Source Port: 55679 '\n 'Destination Port: 53 '\n 'Protocol: UDP '\n 'Type: DNS '\n 'Size: 380 '\n 'Protocol Data: DNS Query for wpad.localdomain '\n 'Stream Data: \\'\\\\xb8\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00'\n '\\\\x00\\\\x00\\\\x04wpad\\\\x0blocaldomain\\\\x00\\\\x00\\\\x01\\\\x00\\\\x01\\\\xb8'\n '\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04wpa\\' '\n 'First Packet ID: 11 '\n 'Last Packet ID: 1307 '\n 'Packet Count: 5')\n expected_short_message = (\n 'Type: DNS '\n 'First Packet ID: 11')\n\n self._TestGetMessageStrings(event, expected_message, expected_short_message)",
"def convert_pcap_to_dataframe(input_file):\r\n if not os.path.exists(input_file):\r\n raise IOError(\"File \" + input_file + \" does not exist\")\r\n\r\n tshark_fields = \"-e frame.time_epoch \" \\\r\n \"-e _ws.col.Source \" \\\r\n \"-e _ws.col.Destination \" \\\r\n \"-e _ws.col.Protocol \" \\\r\n \"-e frame.len \" \\\r\n \"-e ip.ttl \" \\\r\n \"-e ip.flags.mf \" \\\r\n \"-e ip.frag_offset \" \\\r\n \"-e icmp.type \" \\\r\n \"-e tcp.srcport \" \\\r\n \"-e tcp.dstport \" \\\r\n \"-e udp.srcport \" \\\r\n \"-e udp.dstport \" \\\r\n \"-e dns.qry.name \" \\\r\n \"-e dns.qry.type \" \\\r\n \"-e http.request \" \\\r\n \"-e http.response \" \\\r\n \"-e http.user_agent \" \\\r\n \"-e tcp.flags.str \" \\\r\n \"-e ntp.priv.reqcode \"\r\n\r\n temporary_file = tempfile.TemporaryFile(\"r+b\")\r\n\r\n # print(shutil.which(command))\r\n\r\n p = subprocess.Popen([settings.TSHARK + \" -n -r \\\"\" + input_file + \"\\\" -E separator='\\x03' -E header=y -T fields \" + tshark_fields],\r\n shell=True, stdout=temporary_file) #\\x03 is ETX\r\n p.communicate()\r\n p.wait()\r\n\r\n # Reset file pointer to start of file\r\n temporary_file.seek(0)\r\n\r\n df = pd.read_csv(temporary_file, sep=\"\\x03\", low_memory=False, error_bad_lines=False)\r\n\r\n temporary_file.close()\r\n\r\n if ('tcp.srcport' in df.columns) and ('udp.srcport' in df.columns) and ('tcp.dstport' in df.columns) and \\\r\n ('udp.dstport' in df.columns):\r\n # Combine source and destination ports from tcp and udp\r\n df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])\r\n df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])\r\n\r\n df['srcport'] = df['srcport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n df['dstport'] = df['dstport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n\r\n # Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport'\r\n df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport'], axis=1, inplace=True)\r\n\r\n # Drop all empty columns (for making the analysis more efficient! less memory.)\r\n df.dropna(axis=1, how='all', inplace=True)\r\n df = df.fillna(0)\r\n\r\n if 'icmp.type' in df.columns:\r\n df['icmp.type'] = df['icmp.type'].astype(str)\r\n\r\n if 'ip.frag_offset' in df.columns:\r\n df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)\r\n\r\n if 'ip.flags.mf' in df.columns:\r\n df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)\r\n\r\n if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):\r\n # Analyse fragmented packets\r\n df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')\r\n df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)\r\n\r\n if 'tcp.flags.str' in df.columns:\r\n df['tcp.flags.str'] = df['tcp.flags.str'].str.encode(\"utf-8\") \r\n\r\n return df",
"def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for",
"def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for",
"def _read_packets(self, reader: Par2FileReader):\n start_count = len(self)\n pointers = reader.get_pointers()\n # Create RecoverySets if needed\n for set_id, pointer_set in packets.by_set_id(pointers).items():\n print(set_id.hex(), pointer_set)\n if set_id not in self.recovery_sets.keys():\n # Create a RecoverySet if needed\n self.recovery_sets[set_id] = RecoverySet(set_id)\n for pointer in pointer_set:\n self.recovery_sets[set_id].packets.add(pointer)\n logger.info(\"Added {} new packets\".format(len(self) - start_count))",
"def locator(pcap_obj,kml_file):\r\n ip_list = []\r\n for ts, buf in pcap_obj:\r\n eth = dpkt.ethernet.Ethernet(buf)\r\n ip = eth.data\r\n try: # extract all unique IPs\r\n src_ip = str(socket.inet_ntoa(ip.src))\r\n dst_ip = str(socket.inet_ntoa(ip.dst))\r\n if src_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(src_ip)\r\n if dst_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(dst_ip)\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n reader = geoip2.database.Reader('GeoLite2-City_20190129.mmdb') # reading from db(can be redacted)\r\n except FileNotFoundError:\r\n print(f'[!]DB file not in current directory or with a different file name')\r\n sys.exit(1)\r\n area = []\r\n longitude = []\r\n latitude = []\r\n ips = []\r\n for ip_addr in ip_list:\r\n try:\r\n rec = reader.city(ip_addr) # reading IP\r\n country = rec.country.iso_code # assigning country and city\r\n city = rec.city.name\r\n if city is None and country is None:\r\n area.append('Unknown')\r\n elif city is None:\r\n area.append(f'Unknown city:{country}') # looking for unknown country\r\n elif country is None:\r\n area.append(f'Unknown country:{city}') # looking for unknown city\r\n else:\r\n area.append(f'{city} {country}')\r\n\r\n longitude.append(rec.location.longitude)\r\n latitude.append(rec.location.latitude)\r\n ips.append(ip_addr)\r\n except geoip2.errors.AddressNotFoundError:\r\n pass\r\n\r\n try:\r\n kml = simplekml.Kml()\r\n final_path = str(os.getcwd() + os.sep + kml_file) # defining full canonical path\r\n for i in range(0, len(ips)):\r\n kml.newpoint(name=(area[i]),\r\n coords=[(longitude[i], latitude[i])],\r\n description=f'[+] Location = {area[i]}\\n IP: {ips[i]}')\r\n kml.save(final_path)\r\n print(f\"[+] Writing IP locations to {kml_file}\") # writing data to a KML file\r\n print(f\"[+] Opening Google Earth with:{kml_file}\\n\") # reading file with google earth\r\n try:\r\n os.startfile(final_path)\r\n except OSError:\r\n print(f'[!] Warning: Google Earth must be installed to open the kml')\r\n except FileNotFoundError:\r\n pass",
"def capture_packets(self, interface, count=1, timeout=None):\n if interface not in self.packet_captures:\n raise ObjectNotFoundException(\n 'No packet capture is running or was run on host/interface' +\n self.name + '/' + interface)\n tcpd = self.packet_captures[interface]\n return tcpd.wait_for_packets(count, timeout)",
"def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()",
"def extract_from_pcap(device=None, pcap=None, flags=\"-v -r\", path_to_chaosreader=\"/tmp/\"):\n if device is None or pcap is None:\n raise Exception(\"device and pcap are mandatory arguments\")\n device.shell(command=\"cd /tmp\")\n\n cmd = path_to_chaosreader + \"chaosreader0.94 \" + flags + \" \" + pcap\n output = device.shell(command=cmd)\n\n if not re.match(\".*Creating files.*\", output.response(), re.DOTALL):\n device.log(level=\"ERROR\", message=\"Chaosreader ran into an error\")\n raise Exception(\"Chaosreader ran into an error\")\n\n return True",
"def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets"
] | [
"0.5710354",
"0.5529603",
"0.5436237",
"0.5389503",
"0.5370659",
"0.5341635",
"0.5319951",
"0.5312948",
"0.5296589",
"0.52821374",
"0.5232271",
"0.5231817",
"0.5208476",
"0.52023923",
"0.51755023",
"0.5116855",
"0.5112408",
"0.5110333",
"0.5092085",
"0.5089987",
"0.5086152",
"0.5080938",
"0.5051528",
"0.5051528",
"0.5011665",
"0.50084186",
"0.5007459",
"0.49958622",
"0.49733213",
"0.49542275"
] | 0.69073343 | 0 |
Use SortCap class together with batch_sort to sort a pcap | def sort_pcap(inpath, outpath):
inc = SortCap(inpath)
batch_sort(inc, outpath, output_class=lambda path: WriteCap(path, linktype=inc.linktype))
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def external_sort(input_file_name, block_size, output_file_name=None):\n if output_file_name is None:\n output_file_name = input_file_name\n sorter = ExternalSort(input_file_name, block_size, output_file_name)\n sorter.run()",
"def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')",
"def f_way_sort(buffer_size: int, input_paths: list, output_path: str):\n pass",
"def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)",
"def batchSort(input, output, key, buffer_size, tempdir):\n def merge(key=None, *iterables):\n if key is None:\n keyed_iterables = iterables\n else:\n Keyed = namedtuple(\"Keyed\", [\"key\", \"obj\"])\n keyed_iterables = [(Keyed(key(obj), obj) for obj in iterable)\n for iterable in iterables]\n for element in heapq.merge(*keyed_iterables):\n yield element.obj\n\n tempdir = os.path.join(tempdir, str(uuid.uuid4()))\n os.makedirs(tempdir)\n chunks = []\n try:\n with open(input, 'rb', 64 * 1024) as inputFile:\n inputIter = iter(inputFile)\n while True:\n current_chunk = list(islice(inputIter, buffer_size))\n if not current_chunk:\n break\n current_chunk.sort(key=key)\n output_chunk = open(\n os.path.join(tempdir, '%06i' % len(chunks)), 'w+b',\n 64 * 1024)\n chunks.append(output_chunk)\n output_chunk.writelines(current_chunk)\n output_chunk.flush()\n output_chunk.seek(0)\n with open(output, 'wb', 64 * 1024) as output_file:\n output_file.writelines(merge(key, *chunks))\n finally:\n for chunk in chunks:\n try:\n chunk.close()\n os.remove(chunk.name)\n except Exception:\n pass\n print(\"sorted file %s ready\" % (output))",
"def sort_grouped_packets(self, grouped_packets):\n for group in grouped_packets:\n group.sort(key=lambda x: x.time, reverse=False)\n return grouped_packets",
"def testSorting(self):\n mtt.makeTempDirParent()\n shuffledTargets = list(g_targetBlocks)\n for i in xrange(0, 200):\n tmpDir = os.path.abspath(mtt.makeTempDir('sorting'))\n random.shuffle(g_nonTargetBlocks)\n random.shuffle(shuffledTargets)\n shuffledBlocks = list(shuffledTargets)\n lower = 0\n for j in xrange(0, len(g_nonTargetBlocks)):\n # randomly insert the non target blocks, but keep a record\n # of their relative order.\n index = random.randint(lower, len(shuffledBlocks))\n shuffledBlocks.insert(index, g_nonTargetBlocks[j])\n lower = index + 1\n testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n ''.join(shuffledBlocks), g_headers)\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafSorter'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n '--seq', 'hg18.chr7']\n outpipes = [os.path.abspath(os.path.join(tmpDir, 'sorted.maf'))]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafIsSorted(os.path.join(tmpDir, 'sorted.maf')))\n mtt.removeDir(tmpDir)",
"def sort(data, sort_size=500):\n\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= sort_size:\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x\n buf = []\n # The sample left over\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x",
"def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_items_fixture, sorted, benchmark)",
"def array_sort():\n to_concat = []\n for centroid_rgb, cluster in itertools.izip(centroids_rgb, self.clusters):\n # no need to revisit ratio\n new_idxed_arr = tf.concat(1,[tf.slice(cluster, [0,0], [-1,2]),\n tf.tile(tf.expand_dims(\n tf.constant(centroid_rgb), 0),\n multiples=[len(cluster.eval()), 1])])\n to_concat.append(new_idxed_arr)\n\n concated = tf.concat(0, to_concat)\n sorted_arr = np.array(sorted(concated.eval().tolist()), dtype=np.uint8)[:, 2:]\n\n new_img = Image.fromarray(sorted_arr.reshape([self.m, self.n, self.chann]))\n if save:\n new_img.save(outfile, format=format_)\n os.popen(\"open '{}'\".format(outfile))\n else:\n new_img.show()",
"def sort(self):\n self.deckcards.sort()",
"def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)",
"def custom_sort(arr):\n pass",
"def sortby(self):\n ...",
"def batch_sort(input_iterator, output_path, buffer_size=1024**2, output_class=None):\n if not output_class:\n output_class = input_iterator.__class__\n\n chunks = []\n try:\n while True:\n current_chunk = list(islice(input_iterator,buffer_size))\n if not current_chunk:\n break\n current_chunk.sort()\n output_chunk_name = os.path.join(TMPD, \"%06i\" % len(chunks))\n output_chunk = output_class(output_chunk_name)\n\n for elem in current_chunk:\n output_chunk.write(elem.obj)\n output_chunk.close()\n chunks.append(input_iterator.__class__(output_chunk_name))\n\n output_file = output_class(output_path)\n for elem in heapq.merge(*chunks):\n output_file.write(elem.obj)\n output_file.close()\n except:\n raise\n finally:\n for chunk in chunks:\n try:\n chunk.close()\n os.remove(chunk.name)\n except Exception:\n pass",
"def oldsortslice(self):\n ...",
"def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])",
"def sort(self):\r\n\t\treturn sorted(self.sample)",
"def __sort_by_priority(self, input_list):\n print(\"========================Start of __sort_by_priority() Method *\")\n # temp1 = input_list.sort(key=operator.attrgetter(\"submission_time\"))\n # temp1 = temp1.sort(key=operator.attrgetter(str(\"__req_start\")))\n\n # sending one item from list at a time to be enqueued ensuring sorted-nes\n for j in range(len(input_list)):\n self.current_queue.enqueue(input_list[j])\n # print(\"Enqueued the FF item from Input list :\" + input_list[j].showFlightInfo())\n # print(\"*De-queued the FF item from Queue :\" + self.current_queue.dequeue(j).showFlightInfo())\n \"\"\"\n if input_list[i].get_reqStart <= self.current_queue.first.get_reqStart:\n if input_list[i].get_submissionTime <= self.current_queue.first.get_submissionTime:\n temp = self.current_queue.first\n self.current_queue.first = input_list[i]\n self.current_queue.first.next = temp\"\"\"\n print(\"========================End of __sort_by_priority() Method *\")",
"def sort_reads(self): \n if not self.sampling:\n self.convert_to_array()\n self.reads = self.reads[self.reads[:,0].argsort()]",
"def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def sort(self):\r\n return self.sort_targets([self])",
"def sort(self):\n self.cards.sort()",
"def sort(self):\n self.cards.sort()",
"def order_test(self, attack_args, seed=None, cleanup=True, pcap=Lib.test_pcap,\n flag_write_file=False, flag_recalculate_stats=False, flag_print_statistics=False,\n attack_sub_dir=True, test_sub_dir=True):\n\n controller = Ctrl.Controller(pcap_file_path=pcap, do_extra_tests=False, non_verbose=True)\n controller.load_pcap_statistics(flag_write_file, flag_recalculate_stats, flag_print_statistics,\n intervals=[], delete=True)\n controller.process_attacks(attack_args, [[seed]])\n\n caller_function = inspect.stack()[1].function\n\n try:\n path = controller.pcap_dest_path\n file = pcr.RawPcapReader(path)\n packet_a = file.read_packet()\n packet_b = file.read_packet()\n i = 0\n\n while packet_b is not None:\n\n time_a = [packet_a[1].sec, packet_a[1].usec]\n time_b = [packet_b[1].sec, packet_b[1].usec]\n\n if time_a[0] > time_b[0]:\n file.close()\n self.fail(\"Packet order incorrect at: \" + str(i + 1) + \"-\" + str(i + 2) +\n \". Current time: \" + str(time_a) + \" Next time: \" + str(time_b))\n elif time_a[0] == time_b[0]:\n if time_a[1] > time_b[1]:\n file.close()\n self.fail(\"Packet order incorrect at: \" + str(i + 1) + \"-\" + str(i + 2) +\n \". Current time: \" + str(time_a) + \" Next time: \" + str(time_b))\n\n packet_a = packet_b\n packet_b = file.read_packet()\n i += 1\n\n file.close()\n\n except self.failureException:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)\n raise\n\n if cleanup:\n Lib.clean_up(controller)\n else:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)",
"def keysort(*args, **kwargs): # real signature unknown\n pass",
"def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))",
"def process_pcap(self):\n # Create Core Controller\n controller = Controller(self.args.input, self.args.extraTests, self.args.non_verbose, self.args.output,\n self.args.debug)\n\n if not self.args.skip:\n # Load PCAP statistics\n recalculate_intervals = None\n if self.args.recalculate_delete:\n recalculate_intervals = True\n elif self.args.recalculate_yes:\n recalculate_intervals = True\n self.args.recalculate = True\n elif self.args.recalculate_no:\n recalculate_intervals = False\n self.args.recalculate = True\n controller.load_pcap_statistics(self.args.export, self.args.recalculate, self.args.statistics,\n self.args.statistics_interval, self.args.recalculate_delete,\n recalculate_intervals)\n\n if self.args.list_intervals:\n controller.list_interval_statistics()\n\n # Create statistics plots\n if self.args.plot is not None:\n do_entropy = False\n if self.args.extraTests:\n do_entropy = True\n controller.create_statistics_plot(self.args.plot, do_entropy)\n\n # Check rng seed\n if not isinstance(self.args.rngSeed, list):\n self.args.rngSeed = [self.args.rngSeed]\n\n # Process attack(s) with given attack params\n if self.args.attack is not None:\n # If attack is present, load attack with params\n controller.process_attacks(self.args.attack, self.args.rngSeed, self.args.time, self.args.inject_empty)\n\n # Parameter -q without arguments was given -> go into query loop\n if self.args.query == [None]:\n controller.enter_query_mode()\n # Parameter -q with arguments was given -> process query\n elif self.args.query is not None:\n controller.process_db_queries(self.args.query, True)",
"def test_for_different_input_sizes_asc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.asc(c)\n copy.sort()\n\n assert c == copy",
"def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)"
] | [
"0.55296206",
"0.5489133",
"0.5482073",
"0.5391432",
"0.53850245",
"0.53489995",
"0.5328854",
"0.52847326",
"0.5241572",
"0.5189761",
"0.5177634",
"0.51613575",
"0.51465315",
"0.5137804",
"0.5135673",
"0.51097757",
"0.51089555",
"0.51009786",
"0.5096934",
"0.5095022",
"0.5090213",
"0.50829244",
"0.50816697",
"0.50816697",
"0.5043453",
"0.5042157",
"0.5031161",
"0.5017638",
"0.49982575",
"0.49934813"
] | 0.7660257 | 0 |
test that the StrainData.fetch_open_frame works as expected | def test_fetch_open_frame(self):
import requests
pesummary_data = StrainData.fetch_open_frame(
"GW190412", IFO="L1", duration=32, sampling_rate=4096.,
channel="L1:GWOSC-4KHZ_R1_STRAIN", format="hdf5"
)
N = len(pesummary_data)
np.testing.assert_almost_equal(N * pesummary_data.dt.value, 32.)
np.testing.assert_almost_equal(1. / pesummary_data.dt.value, 4096.)
assert pesummary_data.IFO == "L1"
_data = requests.get(
"https://www.gw-openscience.org/eventapi/html/GWTC-2/GW190412/v3/"
"L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf"
)
with open("L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf", "wb") as f:
f.write(_data.content)
data2 = TimeSeries.read(
"L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf",
channel="L1:GWOSC-4KHZ_R1_STRAIN"
)
np.testing.assert_almost_equal(pesummary_data.value, data2.value)
np.testing.assert_almost_equal(
pesummary_data.times.value, data2.times.value
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fetch_open_data(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n gwpy_data = TimeSeries.fetch_open_data(*args)\n np.testing.assert_almost_equal(pesummary_data.value, gwpy_data.value)\n np.testing.assert_almost_equal(\n pesummary_data.times.value, gwpy_data.times.value\n )\n assert isinstance(pesummary_data.gwpy, TimeSeries)\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.times.value, gwpy_data.times.value\n )\n assert pesummary_data.IFO == \"L1\"\n assert list(pesummary_data.strain_dict.keys()) == [\"L1\"]\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].times.value, gwpy_data.times.value\n )",
"def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)",
"def test_fetch_traffic(self):\n assert isinstance(_tabular.fetch_traffic_data(), \n pd.DataFrame)",
"def test_fetch_crime_sedf(self):\n assert isinstance(_vector.fetch_beach_access_data(f='arcgis'), \n pd.DataFrame)",
"def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')",
"def test_readSongData():\n\n # check type\n assert isinstance(song_df, pd.DataFrame)\n\n # check shape\n assert song_df.shape == (1972060, 8)",
"def test_sector_perfomance_pandas_python2(self, mock_urlopen):\n sp = SectorPerformances(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')",
"def supports_fetch_outside_dataloader(self):\r\n return True",
"def test_fetch_crime(self):\n assert isinstance(_tabular.fetch_crime_data(), \n pd.DataFrame)",
"def test_sector_perfomance_pandas_python3(self, mock_urlopen):\n sp = SectorPerformances(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')",
"def test_open_fill(self):",
"def testCircuitFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'circuit',\n orderBy = [timeCol, 'circuit'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')",
"def test_stream_to_data_frame():\n # -- Setup - Create archive in main memory --------------------------------\n archive = Archive()\n for df in [DF1, DF2, DF3]:\n doc = DataFrameDocument(df=df)\n archive.commit(doc)\n # -- Read dataframes for first two snapshots ------------------------------\n #\n # The snapshots are only identical if the data frames where sorted by the\n # data frame index. Thus, the third snapshot will return a data frame in\n # different order.\n pd.testing.assert_frame_equal(archive.open(version=0).to_df(), DF1)\n pd.testing.assert_frame_equal(archive.open(version=1).to_df(), DF2)",
"def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8",
"def test_get_frame(mock_source):\n frame_ingestor = FrameIngestor(mock_source)\n frame_ingestor.get_frame()\n\n mock_source.get_frame.assert_called_once()",
"def test_create_dataframe(chosen_columns, chosen_url):\n print(\"reading in data\")\n chosen_df = readindata(chosen_columns, chosen_url)\n print(\"checking columns\")\n checkcolumnstest(chosen_columns, chosen_df)\n print(\"checking types\")\n checktypestest(chosen_df)\n print(\"checking for Nan\")\n checkfornan(chosen_df)\n print(\"checking 1 row\")\n checkrowstest(chosen_df)\n return True",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)",
"def test_frame_to_rows(self):\n pass",
"def check_new_df(df):\n try:\n new_df = pull_modus()\n\n if assert_frame_equal(df, new_df):\n pass\n else:\n df = new_df.copy()\n\n return df\n \n except:\n pass # 'Modus URL not reachable'",
"def test_fetch_metadata_for_dataset(self):\n\n with patch.object(pd, \"read_csv\") as func:\n func.return_value = pd.DataFrame(\n {\"Archive Link\": [\"test2\", \"test1\", \"test3\"],\n \"Update Date\": [\"2020/1/2\", \"2020/1/1\", \"2020/1/3\"]}\n )\n result = Network.fetch_metadata_for_dataset(\"test\")\n pd.testing.assert_frame_equal(\n result,\n pd.DataFrame(\n {\"Archive Link\": [\"test1\", \"test2\", \"test3\"],\n \"Update Date\": pd.date_range(\"2020/1/1\", \"2020/1/3\")}\n ).set_index(\"Update Date\")\n )\n func.assert_called_once_with(\n \"https://healthdata.gov/api/views/test/rows.csv\",\n dtype=str\n )",
"def test_get_frame_no_source():\n frame_ingestor = FrameIngestor()\n with pytest.raises(RuntimeError):\n frame_ingestor.get_frame()",
"def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n # 4\n self.assertEqual(dataframe['Total'][0], 8198.79)\n # 5\n self.assertDataframeEqual(dataframe, get_equal_dataframe())\n alt_dataframe = get_alter_dataframe(self.database_connection)\n # 6\n self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0])\n # 7\n with self.assertRaises(AssertionError):\n self.assertDataframeEqual(alt_dataframe, dataframe)\n # 8\n self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])",
"def test_spotdb_reader(spotdb_data):\n\n db = spotdb_data\n\n reader = SpotDBReader(db)\n gfs = reader.read()\n\n assert len(gfs) == 4\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n assert metrics < set(gfs[3].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()",
"def fetch_data(self):",
"def test_from_object_df(self):\n df_test = make_simple_dataframe()\n df_read = BaseDataClass.from_object(df_test).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def test_data_source_soaps_get(self):\n pass",
"def test_handle_data(self):\n for close in ('higher', 'lower'):\n print 'close:', close\n self.hd_args['close'] = close\n self.df_stock = self.backtest.handle_data(self.backtest.df_stock, **self.hd_args)\n print self.df_stock.to_string(line_width=400)\n\n print '=' * 100\n\n new_columns = ('found0', 'found1', 'found2',\n 'open_to_high', 'open_to_low', 'open_to_close')\n for column in new_columns:\n self.assertIn(column, self.df_stock.columns)",
"def test_fetch_from_wide_table(self):\n try:\n self.storage.store(RECORD_TABLE, value=\"a\", extra_column=\"EEK!\")\n a = self.clerk.fetch(Record, 1)\n a.value=\"aa\"\n self.clerk.store(a)\n except AttributeError:\n self.fail(\"shouldn't die when columns outnumber attributes\")",
"def _fetch_data(self):\n pass"
] | [
"0.73446274",
"0.65186745",
"0.6414948",
"0.6280164",
"0.61943877",
"0.6105646",
"0.5987783",
"0.596136",
"0.5908009",
"0.5907794",
"0.59036356",
"0.5877466",
"0.5877377",
"0.5873337",
"0.5770271",
"0.57309335",
"0.57286495",
"0.5710138",
"0.569773",
"0.56320137",
"0.5627794",
"0.5566823",
"0.55614346",
"0.55172527",
"0.55113566",
"0.54898965",
"0.5482834",
"0.54818475",
"0.54653126",
"0.5457008"
] | 0.81294215 | 0 |
Add v to a ordered set s and return s. >>> s = Link(1, Link(3, Link(5))) >>> add(s, 0) Link(0, Link(1, Link(3, Link(5)))) >>> add(s, 4) Link(0, Link(1, Link(3, Link(4, Link(5))))) >>> add(s, 6) Link(0, Link(1, Link(3, Link(4, Link(5, Link(6)))))) >>> t = Link(1) >>> add(t, 0) Link(0, Link(1)) | def add(s, v):
if empty(s):
return Link(v)
head = s
if head.first > v:
# s = Link(v, s) #error: assigment, then s will rebind to a new object
# s.first, s.rest = v, s # error s.rest = s
s.first, s.rest = v, Link(s.first, s.rest)
return s
# head.first <= v
while not empty(head.rest) and head.rest.first <= v:
head = head.rest
if head.first == v:
return s
else:
head.rest = Link(v, head.rest)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(s, v):\n if empty(s):\n return Link(v)\n if s.first > v:\n s.first, s.rest = v, Link(s.first, s.rest)\n elif s.first < v and empty(s.rest):\n s.rest = Link(v, s.rest)\n elif s.first < v:\n add(s.rest, v)\n return s",
"def add(self, s):\n current = self.first()\n # case 1 : list is empty, add new node as first node\n if self.size() == 0:\n self.__add_first(s)\n return\n # case 2 : list is not empty, element to be added is smaller than all existing ones\n elif s < current.value():\n self.__add_first(s)\n return\n # case 3 : list is not empty, element is larger than value of current element\n else:\n self.__length += 1\n nxt = current.next()\n # loop until we are at the end to find where to insert element\n while nxt is not None:\n if s < nxt.value():\n n = self.Node(s, nxt)\n current.set_next(n)\n return\n current = nxt\n nxt = nxt.next()\n current.set_next(self.Node(s, None))\n return",
"def adjoin2(s, v):\n if empty(s) or s.first > v:\n return Link(v, s)\n elif s.first == v:\n return s\n else:\n return Link(s.first, adjoin2(s.rest, v))",
"def add(self, s, value):\n\t\thead, tail = s[0], s[1:]\n\t\tcur_node = self.root[head]\n\t\tif not tail:\n\t\t\tcur_node.value = value\n\t\t\treturn # No further recursion\n\t\tcur_node.add(tail, value)",
"def adjoin(s, v):\n if contains(s, v):\n return s\n else:\n return Link(v, s)",
"def add_edge(self, u, v):\r\n keys = self.d.keys()\r\n #if nodes are not in graph, add them\r\n if u not in keys:\r\n self.add_node(u)\r\n if v not in keys:\r\n self.add_node(v)\r\n #add each node to the value set of each other\r\n u_old = self.d[u]\r\n u_new = u_old.union(set(str(v)))\r\n v_old = self.d[v]\r\n v_new = v_old.union(set(str(u)))\r\n self.d.update({u:u_new, v:v_new})",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def __add__(self, _v):\n\t\tif len(self) == len(_v):\n\t\t\tans = copy.deepcopy(self)\n\t\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] += _v[i]\n\t\t\treturn ans",
"def add(self,l,s=True):\r\n\t\t\t\t\r\n\t\t# make line\r\n\t\ts = self.copy()\r\n\t\tl = Li(l)\r\n\t\ta = Li._condense(l,s)\r\n\t\ta = Li(a,c=False)\r\n\t\t\t\r\n\t\t# sort?\r\n\t\tif s:\r\n\t\t\ta = a.sort()\r\n\t\t\t\r\n\t\treturn a",
"def add(self, v):\n if v != \"?\":\n self.n += 1\n self.lo = min(v, self.lo)\n self.hi = max(v, self.hi)\n\n if len(self.has) < the[\"nums\"]:\n self.has.append(v)\n self.is_sorted = False\n\n elif random.random() < the[\"nums\"] / self.n:\n pos = random.randint(0, len(self.has) - 1)\n self.has[pos] = v\n self.is_sorted = False",
"def addEdge(self,u,v):\r\n self.graph[u].append(v)",
"def connect(self, u, v):\n self.e[u].add(v)\n self.e[v].add(u)",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def addVertex(self, v):\r\n self.adjacent.setdefault(v, list())",
"def add_sortedsets(self, key, score, member):\n return self.redis.zadd(key, score, member)",
"def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s",
"def add_edge(self, s, e):\n self.graph[s].append(e)",
"def add_edge(self, u, v):\n self.graph[u].append(v)",
"def add_this_many(x, el, s):\r\n count = 0\r\n for i in range(len(s)):\r\n if s[i] == x:\r\n count +=1\r\n while count > 0:\r\n s.append(el)\r\n count -= 1",
"def union_add(this, that):\n return this.add(that, fill_value=0)",
"def __add__(self, v):\n self.n += 1\n self.cnt[v] += 1\n tmp = self.cnt[v]\n if tmp > self.most:\n self.most = tmp\n self.mode = v\n return v",
"def __addToLevel(self, head, value):\n\n #if DEBUG: print('\\t__addToLevel({})'.format(value))\n\n cur = head\n \n if cur.next == None:\n output = self.__insert(cur,value)\n return output\n \n #cur = cur.next\n\n while cur:\n if cur.next == None or \\\n cur.val == value or\\\n cur.next.val > value:\n output = self.__insert(cur,value)\n #output = cur\n break\n cur = cur.next\n return output",
"def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self",
"def __add__(self,l):\r\n\t\t\r\n\t\t# add\r\n\t\ta = self.add(l)\r\n\t\t\r\n\t\treturn a",
"def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk",
"def addEdge(this, a, b):\n if not a in this.m:\n this.m[a]=set()\n this.m[a].add(b)",
"def add(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n if current.get_data() > item:\n break\n else:\n previous = current\n current = current.get_next()\n \n n = Node(item)\n # If node is to be added at the beginning (incl. case of empty list)\n if previous is None:\n n.set_next(self.head)\n self.head = n\n else:\n previous.set_next(n)\n n.set_next(current)",
"def add(self, vertex):\n if not self.first:\n self.first = vertex\n self.first.next = vertex\n self.first.prev = vertex\n else:\n next = self.first\n prev = next.prev\n next.prev = vertex\n vertex.next = next\n vertex.prev = prev\n prev.next = vertex"
] | [
"0.7793301",
"0.6587982",
"0.6429319",
"0.6309428",
"0.61591506",
"0.60639244",
"0.5989767",
"0.5989767",
"0.58004624",
"0.5777832",
"0.5752884",
"0.56904775",
"0.56822777",
"0.56809616",
"0.56809616",
"0.56750697",
"0.5661514",
"0.56589",
"0.561813",
"0.5617699",
"0.55932224",
"0.55590105",
"0.5557789",
"0.55549157",
"0.5538273",
"0.55347526",
"0.54973215",
"0.5493604",
"0.5422333",
"0.53962153"
] | 0.7641497 | 1 |
Returns the Saved news object data in serializable format | def serialize(self):
return {
"id": self.id,
"headline": self.headline,
"url": self.url,
"image": self.image,
"shortDescription": self.shortDescription,
"saved": True,
"date": self.date,
"savedDate": self.savedDate
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }",
"def serialize(self):",
"def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'year': self.year,\n\t\t\t'artist': self.artist_id,\n\t\t\t'user': self.user_id\n\t\t}",
"def serialize(self):\n return {\n 'name' : self.name,\n 'description' : self.description,\n 'id' : self.id,\n 'picture' : self.picture,\n 'catalog_id' : self.catalog_id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str,\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'ranking': self.ranking,\n 'created_date': self.created_date,\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'description': self.description,\n 'longitude' : self.longitude,\n 'latitude' : self.latitude,\n 'created_on' : self.created_on,\n 'created_by' : self.created_by,\n 'likes' : self.likes\n }",
"def serialize(self, data):",
"def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'body': self.body,\n }",
"def serialize(self):\n return{\n 'name': self.name,\n 'sport': self.sport,\n 'description': self.description,\n 'id': self.id,\n }",
"def serialize(self):\n return {\n 'title': self.title,\n 'first_author': self.first_author,\n 'second_author': self.second_author,\n 'publisher': self.publisher,\n 'year_of_publication': self.year_of_publication\n }",
"def serialize(self):\n return {\n 'title': self.title,\n 'description': self.description,\n 'id': self.id,\n }",
"def saveData(self):\n pass",
"def serialize(self):\r\n return {\r\n \"book_id\": self.id,\r\n \"title\": self.title,\r\n \"author\": self.author,\r\n \"category\": self.category,\r\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'desc': self.desc,\n 'category_id': self.category_id,\n }",
"def serialized_data(self):\n upcoming_shows = self.upcoming_shows\n past_shows = self.past_shows\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'phone': self.phone,\n 'image_link': self.image_link,\n 'facebook_link': self.facebook_link,\n 'city': self.city.name,\n 'state': self.city.state_name,\n 'num_upcoming_shows': len(upcoming_shows),\n 'upcoming_shows_count': len(upcoming_shows),\n 'upcoming_shows': upcoming_shows,\n 'past_shows': past_shows,\n 'past_shows_count': len(past_shows),\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'cat_id': self.cat_id,\n 'user_id': self.user_id\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date_time' : str(self.date_time),\n 'duration' : self.duration,\n 'highlights' : self.highlights,\n 'conference_id' : self.conference_id,\n 'type_of_session_id' : self.type_of_session_id,\n 'speaker_id' : self.speaker_id,\n 'location_id' : self.location_id,\n 'documents' : self.documents \n }",
"def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str, \n }",
"def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"detail\": self.detail,\n \"date_on\": self.date_on,\n }",
"def serialized_data(self):\n upcoming_shows = self.upcoming_shows\n past_shows = self.past_shows\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'address': self.address,\n 'phone': self.phone,\n 'image_link': self.image_link,\n 'facebook_link': self.facebook_link,\n 'city': self.city.name,\n 'state': self.city.state_name,\n 'genres': self.genres if self.genres else [],\n 'website': self.website,\n 'seeking_description': self.seeking_description,\n 'seeking_talent': self.seeking_talent,\n 'num_upcoming_shows': len(upcoming_shows),\n 'upcoming_shows_count': len(upcoming_shows),\n 'upcoming_shows': upcoming_shows,\n 'past_shows': past_shows,\n 'past_shows_count': len(past_shows),\n }",
"def serialize(self):\n return{\n # 'date': self.date,\n 'date': self.date,\n 'id': self.id,\n }",
"def save_data(self):\n pass",
"def serialize(self):\n return {\n 'id': self.id,\n 'publication_id': self.publication_id,\n 'filename': self.filename,\n 'is_valid_format': self.is_valid_format,\n 'format_validation_message': self.format_validation_message,\n 'is_valid_data': self.is_valid_data,\n 'data_validation_message': self.data_validation_message,\n # 'user_id': self.user_id\n 'user_name': self.user.name\n }",
"def serialize(self):\n pass",
"def serialize(self):\n return {\n\n\n }",
"def serialize(self, data):\n return data",
"def dump(self):\n return self._data.dump()",
"def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'tracknum': self.track_num,\n\t\t\t'video': self.video_id\n\t\t}",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date' : str(self.date),\n 'owner_id' : self.owner_id,\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'session_id' : self.session_id,\n 'filename' : self.filename,\n 'filetype' : self.filetype\n }"
] | [
"0.67177093",
"0.67075676",
"0.66094065",
"0.6601732",
"0.6585369",
"0.6565495",
"0.6531196",
"0.649494",
"0.64723766",
"0.64716303",
"0.64442974",
"0.64352673",
"0.6431501",
"0.64202684",
"0.64165074",
"0.64140564",
"0.64076835",
"0.64070576",
"0.6395844",
"0.6394233",
"0.63878274",
"0.63833",
"0.6366175",
"0.6345943",
"0.63291746",
"0.6319308",
"0.63157916",
"0.630657",
"0.6305174",
"0.629103"
] | 0.6998519 | 0 |
This method is responsible for getting the messages to respond with Also covers analytics events for those messages for e.g. click, view | def respond_to_message(self):
MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)
data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data)
outgoing_messages = data.get("messages", [])
events_to_publish = data.get("publish_events", [])
agent_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "AGENT"]
user_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "USER"]
agent_response = Util.send_messages(messages=agent_messages, sending_to="AGENT")
user_response = Util.send_messages(messages=user_messages, sending_to="USER")
if agent_response or user_response:
Util.update_state(meta_data=self.meta_data, state=self.state)
Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish)
return 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_messages(self):\n pass",
"def handle_message(self, message):",
"def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"",
"def msg_event(self, event):\r\n pass",
"def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]",
"def on_message(data):\n pass",
"def list_messages(self):",
"def _handle_message(self, msg):\n self.event('message', msg)",
"def receive_message(self, context, message):\r\n pass",
"def receive_message(self, message):",
"def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)",
"def handle_message(self, msg):\n pass",
"def get_response(self):\n return self.messages",
"def get_messages(self):\n print(\"Adding callback...\")\n self.__class__.callbacks.add(self._callback)\n #print(self.__class__.callbacks)",
"def handle_event(event_data):\n # define variable of data\n message = event_data.get('event')\n channel = message.get('channel')\n msg = message.get('text').lower()\n userid = message.get('user')\n username = convert_unicode(sc.api_call('users.info', user=userid)).get('user').get('profile').get('display_name')\n text = None\n print(msg)\n\n if \"tasks\" in msg or \"task\" in msg:\n ret_data = fb.display_list('Business', False)\n ret_data = filter(lambda x:username in [names.strip() for names in x[2].split(',')], ret_data)\n text = \"Click <http://team8tasks.serveo.net|here> to go to the Task Website\\n\"\n ongoing_tasks = return_tasks(ret_data, 'ongoing')\n overdue_tasks = return_tasks(ret_data, 'overdue')\n completed_tasks = return_tasks(ret_data, 'completed')\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True, attachments=[{'text': ongoing_tasks, 'mrkdwn_in': [\"text\"], 'color': '#03572C'}, {'text': overdue_tasks, 'mrkdwn_in': [\"text\"], 'color': '#ff6666'}, {'text': completed_tasks, 'mrkdwn_in': [\"text\"]}])\n return\n elif \"hello\" in msg or \"hi\" in msg or \"hey\" in msg:\n text = \"Hello <@\" + userid + \">! What's up?\"\n elif \"no u\" in msg:\n text = \"no u\"\n else:\n text = 'Sorry I do not know what that command means. Try \"tasks\" to list your tasks.'\n\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True)",
"def messages(self) -> dict:\n raise NotImplementedError",
"def handleMessage(msg):",
"def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)",
"def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json",
"def onMessage(self, message):\n raise NotImplementedError",
"def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)",
"def processMessage(self, *args, **kwargs):\r\n pass",
"def receive_message(self, message):\r\n return",
"def get_messages(self):\r\n return self.messages",
"def get_message(self):\n\n if self.gotten: return\n self.get_recipients()\n self.get_text()\n self.get_price()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True",
"def on_event():\n event = request.get_json()\n if event['type'] == 'ADDED_TO_SPACE' and not event['space']['singleUserBotDm']:\n text = 'Thanks for adding me to \"%s\"!' % (event['space']['displayName'] if event['space']['displayName'] else 'this chat')\n elif event['type'] == 'MESSAGE':\n text = 'You said: `%s`' % str(chat_service.spaces().list().execute()) #event['message']['text']\n else:\n return\n return json.jsonify({'text': text, 'thread':\"chet_cool\"})",
"def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))",
"def msg_handler(self, msg):\n self.view.frame.log.append(msg)",
"def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message",
"def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )"
] | [
"0.64154905",
"0.63751894",
"0.635842",
"0.62978166",
"0.6275776",
"0.62662673",
"0.6228096",
"0.6210156",
"0.6176662",
"0.6156185",
"0.6149644",
"0.61168385",
"0.60755175",
"0.6055626",
"0.6054083",
"0.6031606",
"0.6029565",
"0.6019241",
"0.5985364",
"0.5980711",
"0.59471154",
"0.5932648",
"0.59226626",
"0.59200895",
"0.5905758",
"0.59049445",
"0.5897722",
"0.5895032",
"0.5876532",
"0.5873094"
] | 0.69531924 | 0 |
Given the vm data from the API, create a dictionary that contains all of the necessary keys for the template The keys will be checked in the update method and not here, this method is only concerned with fetching the data that it can. | def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:
vm_id = vm_data['id']
Windows.logger.debug(f'Compiling template data for VM #{vm_id}')
data: Dict[str, Any] = {key: None for key in Windows.template_keys}
data['vm_identifier'] = f'{vm_data["project"]["id"]}_{vm_id}'
# changes
changes: Dict[str, Any] = {
'ram': False,
'cpu': False,
'storages': False,
}
updates = vm_data['history'][0]
try:
if updates['ram_quantity'] is not None:
# RAM is needed in MB for the updater but we take it in in GB (1024, not 1000)
changes['ram'] = vm_data['ram'] * 1024
except KeyError:
pass
try:
if updates['cpu_quantity'] is not None:
changes['cpu'] = vm_data['cpu']
except KeyError:
pass
# Fetch the drive information for the update
try:
if len(updates['storage_histories']) != 0:
Windows.logger.debug(f'Fetching drives for VM #{vm_id}')
child_span = opentracing.tracer.start_span('fetch_drive_updates', child_of=span)
changes['storages'] = Windows.fetch_drive_updates(vm_data)
child_span.finish()
except KeyError:
pass
# Add changes to data
data['changes'] = changes
data['storage_type'] = vm_data['storage_type']
data['vms_path'] = settings.HYPERV_VMS_PATH
# Get the host name of the server
host_name = None
for interface in vm_data['server_data']['interfaces']:
if interface['enabled'] is True and interface['ip_address'] is not None:
if IPAddress(str(interface['ip_address'])).version == 6:
host_name = interface['hostname']
break
if host_name is None:
error = f'Host ip address not found for the server # {vm_data["server_id"]}.'
Windows.logger.error(error)
vm_data['errors'].append(error)
return None
# Add the host information to the data
data['host_name'] = host_name
# Determine restart
data['restart'] = vm_data['restart']
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data",
"def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d",
"def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data",
"def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids",
"def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data",
"def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values",
"def build_dict(self, user_info):\n if user_info:\n lookup_dict = {\n \"cloud_stats\": {\"title\": \"Cloud Statistics\",\n \"link\": \"/status/cloud\",\n \"is_admin_panel\": True,\n \"template\": \"status/cloud.html\"},\n \"database_stats\": {\"title\": \"Database Information\",\n \"is_admin_panel\": True,\n \"template\": \"apps/database.html\"},\n \"memcache_stats\": {\"title\": \"Global Memcache Statistics\",\n \"is_admin_panel\": True,\n \"template\": \"apps/memcache.html\"},\n \"upload_app\": {\"title\": \"Upload Application\",\n \"link\": \"/apps/new\",\n \"template\": \"apps/new.html\"},\n \"delete_app\": {\"title\": \"Delete Application\",\n \"link\": \"/apps/delete\",\n \"template\": \"apps/delete.html\"},\n \"relocate_app\": {\"title\": \"Relocate Application\",\n \"link\": \"/apps/relocate\",\n \"template\": \"apps/relocate.html\"},\n \"service_accounts\": {\"title\": \"Service Accounts\",\n \"link\": \"/service_accounts\"},\n \"manage_users\": {\"title\": \"Manage Users\",\n \"link\": \"/authorize\",\n \"is_admin_panel\": True,\n \"template\": \"authorize/cloud.html\"},\n \"logging\": {\"title\": \"Log Viewer\",\n \"link\": \"/logs\",\n \"template\": \"logs/main.html\"},\n \"taskqueue\": {\"title\": \"TaskQueue\",\n \"link\": self.get_flower_url()},\n \"pull_queue_viewer\": {\"title\": \"Pull Queue Viewer\",\n \"link\": \"/pull_queue_viewer\"},\n \"cron\": {\"title\": \"Cron\",\n \"link\": \"/cron\",\n \"template\": \"cron/console.html\"},\n \"app_console\": {\"title\": \"Application Statistics\",\n \"template\": \"apps/console.html\",\n \"link\": \"/apps/\"},\n \"datastore_viewer\": {\"title\": \"Datastore Viewer\",\n \"link\": \"/datastore_viewer\"}\n }\n if user_info.can_upload_apps:\n lookup_dict[\"app_management\"] = {\"App Management\":\n [{\"upload_app\": lookup_dict[\n \"upload_app\"]},\n {\"delete_app\": lookup_dict[\n \"delete_app\"]},\n {\"relocate_app\": lookup_dict[\n \"relocate_app\"]},\n {\"service_accounts\": lookup_dict[\n \"service_accounts\"]}]}\n if user_info.is_user_cloud_admin:\n lookup_dict[\"appscale_management\"] = {\"AppScale Management\":\n [{\"cloud_stats\": lookup_dict[\n \"cloud_stats\"]},\n {\"manage_users\": lookup_dict[\n \"manage_users\"]}]}\n if user_info.owned_apps or user_info.is_user_cloud_admin:\n sections = ['taskqueue', 'pull_queue_viewer', 'logging',\n 'app_console', 'cron', 'datastore_viewer']\n lookup_dict[\"debugging_monitoring\"] = {\n \"Debugging/Monitoring\": [{section: lookup_dict[section]}\n for section in sections]\n }\n return lookup_dict\n else:\n return {}",
"def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }",
"def _get_vm_ids_and_names_dict(self):\r\n vm_ids = {}\r\n vm_names = {}\r\n\r\n for content in self.content:\r\n if content['type'].lower() in ('vm', 'virtual machine'):\r\n vm_ids[content['id']] = content['display_name']\r\n vm_names[content['display_name']] = content['id']\r\n\r\n return vm_ids, vm_names",
"def get_template_data(cls, pydata, view):\n return dict(previewdata=cls.get_previewdata(pydata),\n content_types=view.content_types,\n title=cls.html_title,\n brandingtitle=cls.html_brandingtitle,\n heading=cls.html_heading)",
"def get_data(self, **kwargs):\n\n self.data = {}\n #node_data = ''\n #link_data = ''\n templates_data = self.request_from_server('templates')\n self.templates = templates_data\n project_data = self.request_from_server('projects')\n for project in project_data:\n project_name = project['name']\n if 'project_name' in kwargs:\n if project_name != kwargs['project_name']:\n continue\n\n self.data[project_name] = {}\n self.data[project_name]['project_id'] = project['project_id']\n self.data[project_name]['nodes'] = {}\n node_data = self.request_from_server('projects/{}/nodes'.format(project['project_id']))\n link_data = self.request_from_server('projects/{}/links'.format(project['project_id']))\n for node in node_data:\n node_name = node['name']\n self.data[project_name]['nodes'][node_name] = {}\n self.data[project_name]['nodes'][node_name]['node_id'] = node['node_id']\n self.data[project_name]['nodes'][node_name]['template_id'] = node['template_id']\n self.data[project_name]['nodes'][node_name]['node_type'] = node['node_type']\n self.data[project_name]['nodes'][node_name]['console_port'] = node['console']\n self.data[project_name]['nodes'][node_name]['console_session'] = None\n self.data[project_name]['nodes'][node_name]['x'] = node['x']\n self.data[project_name]['nodes'][node_name]['y'] = node['y']\n self.data[project_name]['nodes'][node_name]['ports'] = {}\n if project['status'] != 'closed':\n self.data[project_name]['nodes'][node_name]['status'] = node['status']\n for port in node['ports']:\n port_name = port['short_name']\n self.data[project_name]['nodes'][node_name]['ports'][port_name] = {}\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['adapter_number'] = port['adapter_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['port_number'] = port['port_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_type'] = port['link_type']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = None\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = False\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = None\n for link in link_data:\n for link_node in link['nodes']:\n if node['node_id'] == link_node['node_id']:\n if link_node['label']['text'] == port_name:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = link['link_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = True\n if link['nodes'].index(link_node) == 0:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][1]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][1]['node_id'])\n else:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][0]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][0]['node_id'])",
"def fill_default_attributes(self, template_dictionary, escape_db_operations=False):\n template_dictionary = self._populate_user_and_project(template_dictionary, escape_db_operations)\n template_dictionary = self._populate_message(template_dictionary)\n template_dictionary = self._populate_menu(template_dictionary)\n\n if KEY_ERRORS not in template_dictionary:\n template_dictionary[KEY_ERRORS] = {}\n if KEY_FORM_DATA not in template_dictionary:\n template_dictionary[KEY_FORM_DATA] = {}\n if KEY_SUB_SECTION not in template_dictionary and KEY_SECTION in template_dictionary:\n template_dictionary[KEY_SUB_SECTION] = template_dictionary[KEY_SECTION]\n if KEY_SUBMENU_LIST not in template_dictionary:\n template_dictionary[KEY_SUBMENU_LIST] = None\n\n template_dictionary[KEY_CURRENT_VERSION] = cfg.BASE_VERSION\n return template_dictionary",
"def __verify_details(self):\n if self.major[0] not in self.data[self.root]:\n self.data[self.root][self.major[0]] = {}\n for key, value in self.template_data[self.root][self.major[0]].items():\n key, value = self.__verified_details_key_value(key, value)\n self.data[self.root][self.major[0]][key] = self.__verify_values(key, value, self.data[self.root][self.major[0]])",
"def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data",
"def get_template_variables(hostname: str) -> dict:\n looprun = asyncio.get_event_loop().run_until_complete\n\n nb = NetboxClient(timeout=60)\n nb_dev = looprun(nb.fetch_device(hostname))\n\n # setup API params to retrieve only those items specific to this device.\n # the APIs used share the same parameters :-)\n\n params = dict(device_id=nb_dev[\"id\"], limit=0)\n\n res_intfs, res_ipaddrs, res_site = looprun(\n asyncio.gather(\n nb.get(\"/dcim/interfaces\", params=params),\n nb.get(\"/ipam/ip-addresses\", params=params),\n nb.get(f\"/dcim/sites/{nb_dev['site']['id']}\"),\n )\n )\n\n rp_ipaddr = None\n\n if hostname.endswith(\"rs21\"):\n # need to fetch rs22 loopback0 IP address\n res: Response = looprun(\n nb.get(\n \"/ipam/ip-addresses\",\n params={\"interface\": \"loopback0\", \"device\": hostname[0:3] + \"rs22\"},\n )\n )\n\n res.raise_for_status()\n body = res.json()\n if body[\"count\"] != 1:\n raise RuntimeError(\"RS22 loopback0 IP not found\")\n\n rp_ipaddr = body[\"results\"][0][\"address\"]\n\n looprun(nb.aclose())\n\n intf_recs = res_intfs.json()[\"results\"]\n ipaddr_recs = res_ipaddrs.json()[\"results\"]\n site_rec = res_site.json()\n\n tvars = dict(\n hostname=nb_dev[\"name\"],\n site=nb_dev[\"site\"][\"slug\"],\n ASN=site_rec[\"asn\"],\n INTF_DESC={rec[\"name\"]: rec[\"description\"] for rec in intf_recs},\n INTF_IPADDR={rec[\"interface\"][\"name\"]: rec[\"address\"] for rec in ipaddr_recs},\n )\n\n if not rp_ipaddr:\n rp_ipaddr = tvars[\"INTF_IPADDR\"][\"loopback0\"]\n\n tvars[\"pim_rp_address\"] = rp_ipaddr.split(\"/\")[0]\n\n if (rcd := nb_dev[\"config_context\"]) is not None:\n tvars.update(rcd)\n\n if (lcd := nb_dev[\"local_context_data\"]) is not None:\n tvars.update(lcd)\n\n return tvars",
"def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data",
"def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result",
"def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag",
"def get_data_to_create_object(self):\n return {}",
"def get_json(self):\n data = {}\n data['ip'] = self.ip\n\n try:\n data['country'] = self.processedvtdata[\"country\"]\n except KeyError:\n data['country'] = 'None'\n try:\n data['as'] = self.processedvtdata[\"as_owner\"]\n except KeyError:\n data['as'] = 'None'\n try:\n data['rdns'] = self.processedvtdata[\"self.reversedns\"]\n except KeyError:\n data['rdns'] = 'None'\n try:\n data['label'] = self.expertlabel\n except AttributeError:\n data['label'] = ''\n\n # geodata\n #{\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n if self.geodata:\n data['geodata'] = self.geodata\n \n # vt resolutions. Is a list\n data['vt'] = {}\n try:\n if self.processedvtdata['resolutions'] != 'None':\n data['vt']['resolutions'] = []\n for count, resolution_tuple in enumerate(self.processedvtdata['resolutions']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = resolution_tuple[0]\n temp['domain'] = resolution_tuple[1]\n data['vt']['resolutions'].append(temp)\n except KeyError:\n pass\n\n # vt urls. Is a list\n try:\n if self.processedvtdata['detected_urls'] != 'None':\n data['vt']['detected_urls'] = []\n for count, url_tuple in enumerate(self.processedvtdata['detected_urls']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = url_tuple[0]\n temp['url'] = url_tuple[1][0]\n temp['detections'] = str(url_tuple[1][1]) + '/' + str(url_tuple[1][2])\n data['vt']['detected_urls'].append(temp)\n except KeyError:\n pass\n\n\n # vt detected communicating samples. Is a list\n try:\n if self.processedvtdata['detected_communicating_samples'] != 'None':\n data['vt']['detected_communicating_samples'] = []\n for count, communcating_tuple in enumerate(self.processedvtdata['detected_communicating_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = communcating_tuple[0]\n temp['detections'] = str(communcating_tuple[1][0]) + '/' + str(communcating_tuple[1][1])\n temp['sha256'] = communcating_tuple[1][2]\n data['vt']['detected_communicating_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt detected downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_downloaded_samples'] != 'None':\n data['vt']['detected_downloaded_samples'] = []\n for count, detected_tuple in enumerate(self.processedvtdata['detected_downloaded_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = detected_tuple[0]\n temp['detections'] = str(detected_tuple[1][0]) + '/' + str(detected_tuple[1][1])\n temp['sha256'] = detected_tuple[1][2]\n data['vt']['detected_downloaded_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt referrer downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_referrer_samples'] != 'None':\n data['vt']['detected_referrer_samples'] = []\n for count, referrer_tuple in enumerate(self.processedvtdata['detected_referrer_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['sha256'] = referrer_tuple[0]\n temp['detections'] = str(referrer_tuple[1][0]) + '/' + str(referrer_tuple[1][1])\n data['vt']['detected_referrer_samples'].append(temp)\n except AttributeError:\n pass\n\n # pt data\n data['pt'] = {}\n if self.processedptdata:\n count = 0\n data['pt']['passive_dns'] = []\n for result in self.processedptdata_results:\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['lastseen'] = result[0]\n temp['firstseen'] = result[1][0]\n temp['hostname'] = result[1][1]\n data['pt']['passive_dns'].append(temp)\n count += 1\n\n # shodan data\n try:\n if self.shodandata:\n data['shodan'] = self.shodandata\n except AttributeError:\n pass\n\n data = json.dumps(data)\n return data",
"def _setup_report_data(self):\n # current_app.logger.debug('Setup report data template starting.')\n template = self._get_template()\n current_app.logger.debug('Setup report data template completed, setup data starting.')\n data = {\n 'reportName': self._get_report_filename(),\n 'template': template,\n 'templateVars': self._get_template_data()\n }\n current_app.logger.debug('Setup report data completed.')\n return data",
"def normalize_data(vms, vm_statuses, nics, public_ips):\n normalized_data = {}\n for vm_id in vms:\n vm_data = vms[vm_id]\n name = vm_data['name']\n nic_id = vm_data['nic_id']\n nic_data = nics[nic_id]\n public_ip_id = nic_data['public_ip_id']\n public_ip_data = public_ips[public_ip_id]\n public_ip = public_ip_data['address']\n public_dns_name = public_ip_data['fqdn']\n status = vm_statuses[vm_id]\n source = \"Azure\"\n instance_data = { 'public_ip': public_ip, 'public_dns_name': public_dns_name, 'status': status, 'source': source }\n normalized_data[name] = instance_data\n return normalized_data",
"def get_model_template(self, ApiId: str, ModelId: str) -> Dict:\n pass",
"def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data",
"def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT",
"def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data",
"def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag",
"def _template_data(self):\n return {\"form\": self.form.render()}",
"def get_low_use_template_data(self, creator, low_use_instances, instances_scheduled_for_deletion):\n template_data = {\n 'creator': creator,\n 'creator_name': creator.split('@')[0],\n 'instance': []\n }\n\n for instance in low_use_instances:\n if instance['Creator'] is None:\n instance['Creator'] = 'Unknown'\n instance_data = {\n 'instance_id': instance['InstanceID'],\n 'instance_creator': instance['Creator'],\n 'scheduled_for_deletion': False,\n 'cost': instance['Cost'],\n 'average_cpu_usage': instance['AverageCpuUsage'],\n 'average_network_usage': instance['AverageNetworkUsage']\n }\n template_data['instance'].append(instance_data)\n\n for instance in instances_scheduled_for_deletion:\n if instance['Creator'] is None:\n instance['Creator'] = 'Unknown'\n instance_data = {\n 'instance_id': instance['InstanceID'],\n 'instance_creator': instance['Creator'],\n 'scheduled_for_deletion': True,\n 'cost': instance['Cost'],\n 'average_cpu_usage': instance['AverageCpuUsage'],\n 'average_network_usage': instance['AverageNetworkUsage']\n }\n template_data['instance'].append(instance_data)\n\n return template_data",
"def generate(self) -> dict:\n user_data = {\n \"merge_proposals\": self._render_merge_proposals(),\n \"bug_reports\": self._render_reported(),\n \"code_reviews\": {},\n }\n for project in self.projects:\n user_data[\"code_reviews\"][\n project.name\n ] = project.render_project_votes_by_user(self.user)\n\n return user_data"
] | [
"0.69213694",
"0.6188512",
"0.6179154",
"0.6003007",
"0.5900166",
"0.58198947",
"0.58040434",
"0.5778774",
"0.5772093",
"0.5710987",
"0.5700737",
"0.56920326",
"0.56712186",
"0.566643",
"0.564755",
"0.56175077",
"0.56043386",
"0.5603194",
"0.55743086",
"0.5572557",
"0.5553227",
"0.55519825",
"0.5510064",
"0.5504329",
"0.54972905",
"0.5483972",
"0.5468972",
"0.54597336",
"0.5429259",
"0.5423212"
] | 0.7405127 | 0 |
This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards. It is looking for key strokes to designate ambiguous wild cards in runs. The mouse is ignored until you designate all the wilds (turn phase goes back to play). | def nextEventWildsOnBoard(self):
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nextEvent(self):\n\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n wild_instructions = 'Use the keyboard to designate your prepared wild cards \\r\\n '\n wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n # cannot select prepared cards, so not included in logic below.\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = 'You have signaled you want to buy the card.'\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = 'You have signaled you do not want to buy the card.'\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n HandManagement.ManuallyAssign(self)",
"def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)",
"def control(self):\n while not (self.game_over() or self.quit):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.key == pygame.K_LEFT and len(self.sequence)>=2:\n self.sequence.pop()\n self.board = self.sequence.pop()\n self.draw()\n elif event.key == pygame.K_1:\n self.tip(1)\n elif event.key == pygame.K_2:\n self.tip(2)\n elif event.key == pygame.K_3:\n self.tip(3)\n elif event.key == pygame.K_4:\n self.tip(4)\n elif event.key == pygame.K_5:\n self.tip(5)\n \n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n ## if mouse is pressed get position of cursor ##\n pos = pygame.mouse.get_pos()\n ## check if cursor is on button ##\n for i in range(len(self.buttons)):\n for j in range(len(self.buttons[i])):\n if self.buttons[i][j].collidepoint(pos):\n if self.selected == None:\n self.selected = [i,j]\n elif self.selected == [i,j]:\n self.selected = None\n elif self.board[self.selected[0]][self.selected[1]]==0:\n self.selected = [i,j]\n else:\n if self.move(i,j):\n self.selected = None\n self.draw()\n return True\n else:\n self.selected = None\n self.draw()\n return False\n self.draw()\n return False",
"def playEvents(self, event):\n mouse = pygame.mouse.get_pressed()\n mpos = pygame.mouse.get_pos()\n # If we use the left click\n if mouse[0]:\n # We convert the position of the mouse according to the grid position and the margin\n x, y = mpos[0] % (self.ts + self.ms), mpos[1] % (self.ts + self.ms)\n if x > self.ms and y > self.ms:\n tile = mpos[0] // self.ts, mpos[1] // self.ts\n if self.in_grid(tile) and tile in self.adjacent():\n self.switch(tile)\n\n if event.type == pygame.KEYDOWN:\n for key, dx, dy in ((pygame.K_s, 0, -1), (pygame.K_z, 0, 1), (pygame.K_d, -1, 0), (pygame.K_q, 1, 0)):\n if event.key == key:\n x, y = self.opentile\n tile = x + dx, y + dy\n if self.in_grid(tile):\n self.switch(tile)\n # Move randomly a tile.\n if event.key == pygame.K_SPACE:\n self.random()\n if event.key == pygame.K_a:\n action = self.agent.play(self.format_tiles())\n reward = self.step(action)[1]\n print(f\"Reward: {reward}\")",
"def check_events_battle_screen(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n #check click on cards in hand\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_hand_click_action('hand',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,player2, position = str(i))\n break\n\n for i in range(1,4):\n if Rect(420,(220 + 110*(i-1)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n for i in range(4,7):\n if Rect(245, (220 + 110*(i-4)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-character',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n\n # win/lost back to main menu button\n if Rect(500, 500, 200, 40).collidepoint(pygame.mouse.get_pos()):\n if screen_status.battle_screen_action_indicator == 'game-end':\n screen_status.battle_screen_display = False\n screen_status.welcome_screen_display = True\n\n if Rect(200, 0, 50, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = True\n\n\n # When menu window is on\n if button_status.battle_screen_menu_display == True:\n\n # Turn sound on\n if Rect(447+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = True\n # Turn sound off\n elif Rect(482+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = False\n # Turn music on\n elif Rect(447+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = True\n # Turn music off\n elif Rect(482+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = False\n\n # Change Theme\n elif Rect(447+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Lith Harbor'\n change_bg_music('Lith Harbor')\n\n elif Rect(559+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Leafre'\n change_bg_music('Leafre')\n\n elif Rect(447+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Pantheon'\n change_bg_music('Pantheon')\n\n elif Rect(559+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Ellinia'\n change_bg_music('Ellinia')\n\n # change AI speeding\n elif Rect(475+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '1000'\n\n elif Rect(545+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '2000'\n\n elif Rect(615+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '3000'\n\n # Quit settings window\n elif Rect(699+280, 300-270, 21, 21).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_menu_display = False\n\n # Concede and back to main menu\n elif Rect(700, 310, 180, 40).collidepoint(pygame.mouse.get_pos()):\n screen_status.battle_screen_action_indicator = 'game-end'\n button_status.battle_screen_win_lost_indicator = 'lost'\n\n if button_status.rules_display == True:\n # When we click on '>'\n if Rect(640, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) < 4:\n button_status.rules_page_id = str(int(button_status.rules_page_id)+1)\n else:\n pass\n # When we click on '<'\n elif Rect(540, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) > 1:\n button_status.rules_page_id = str(int(button_status.rules_page_id)-1)\n else:\n pass\n\n elif Rect(975, 35, 25, 25).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = False\n\n\n\n\n if rect_union(buttons).collidepoint(pygame.mouse.get_pos()):\n for button in buttons:\n if button.rect.collidepoint(pygame.mouse.get_pos()):\n if button.text == 'Menu':\n button_status.battle_screen_menu_display = True\n\n elif button.text == '>':\n screen_status.battle_screen_my_hand_page_id += 1\n button_status.battle_screen_my_hand_indicator_display = False # Turn off display of buttons when change page\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == '<':\n screen_status.battle_screen_my_hand_page_id -= 1\n button_status.battle_screen_my_hand_indicator_display = False\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == 'level up':\n battle_screen_hand_click_action('level up',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n elif button.text == 'Yes':\n battle_screen_instruction_bar_yes_skip_action('yes',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,action,player2)\n elif button.text == 'Skip':\n battle_screen_instruction_bar_yes_skip_action('skip',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, action, player2)\n\n\n elif event.type == pygame.MOUSEMOTION: # Mostly for zoom in\n x = 0 # indicator helps remove zoom in.\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'hand'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(1050,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(20,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n\n if Rect(1050,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1'\n x = 1\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2'\n x = 1\n\n if Rect(880, 5, 50, 20).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_history_bar_detail_display = True\n x = 1\n\n if x == 0:\n button_status.card_zoom_active = False\n button_status.battle_screen_history_bar_detail_display = False\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass",
"def run_game(self, board):\n run_program = True\n\n while run_program:\n # eventlistener for mouse events\n for event in pygame.event.get():\n if pygame.mouse.get_pressed() and event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Get position of mouse.\n (x, y) = pygame.mouse.get_pos()\n\n # Set circle position in the middle of the grid_square.\n draw_x = x - (x % self.square_size) + self.square_mid\n\n # Calculation to get xPosition from selected Mouse xPosition.\n x = x // 80\n\n # Check if column is full before placing. Break out if that's the case.\n if self.check_if_column_full(board, x):\n break\n\n # Calculate the yPosition, where the chip should be placed with various helper methods.\n draw_y = self.height - (self.square_size * self.draw_dict_mapping[self.get_y_pos(board, x)]) + 40\n\n # Check, which players turn it is.\n if self.playerOne:\n # Player Ones turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 1\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 1):\n run_program = False\n self.switch_player()\n else:\n # Player Twos turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 2\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 2):\n run_program = False\n self.switch_player()\n\n if event.type == pygame.KEYDOWN:\n # End the game with escape.\n if event.key == pygame.K_ESCAPE:\n self.draw = True\n run_program = False\n\n # End the Program with the X in the upper right corner.\n elif event.type == pygame.QUIT:\n self.draw = True\n run_program = False\n\n pygame.display.flip()\n self.game_over(self.playerOne, self.draw)\n # wait for given time and end the game\n pygame.time.wait(5000)\n pygame.quit()",
"def event2513():\n header(2513)\n\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit0)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit1)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit2)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit3)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit4)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit5)\n if_condition_true(0, 7)\n\n if_event_flag_on(1, EVENT.ScintillaRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(1, -1)\n\n if_condition_true(0, 1)\n\n # Roll d30.\n flag.disable_chunk(970, 999)\n flag.enable_random_in_chunk(970, 999)\n\n # Count appropriate flag range as success and spawn Scintilla projectile.\n if_player_has_special_effect(2, SPEFFECT.RunicHit0)\n skip_if_condition_false(4, 2)\n if_at_least_one_true_flag_in_range(-2, 970, 971) # 2/30 chance at Scintilla level 0.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(3, SPEFFECT.RunicHit1)\n skip_if_condition_false(4, 3)\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance at Scintilla level 1.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(4, SPEFFECT.RunicHit2)\n skip_if_condition_false(4, 4)\n if_at_least_one_true_flag_in_range(-2, 970, 973) # 4/30 chance at Scintilla level 2.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(5, SPEFFECT.RunicHit3)\n skip_if_condition_false(4, 5)\n if_at_least_one_true_flag_in_range(-2, 970, 974) # 5/30 chance at Scintilla level 3.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit4)\n skip_if_condition_false(4, 6)\n if_at_least_one_true_flag_in_range(-2, 970, 975) # 6/30 chance at Scintilla level 4.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(-3, SPEFFECT.RunicHit5)\n restart_if_condition_false(-3) # This shouldn't happen.\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance of Crystal Scintilla at level 5.\n skip_if_condition_false(2, -2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2002)\n skip(2)\n if_at_least_one_true_flag_in_range(-4, 973, 976) # 4/30 chance of normal Scintilla at level 5.\n skip_if_condition_false(1, -4)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()",
"def events(self):\n # catch all events here\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit_game()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n menu.paused = True\n menu.pause_menu() #code gets stuck in this call until a button is pressed in the pause menu\n self.clock=pg.time.Clock()\n if event.key == pg.K_h:\n self.draw_debug = not self.draw_debug\n if event.key == pg.K_o:\n if self.flashlight.on:#turning off flashlight\n self.darkness.on = True\n self.battery.duration-=pg.time.get_ticks()-self.battery.last_update\n self.flashlight.on=False\n else: #turning on flashlight\n self.darkness.on = False\n self.battery.last_update=pg.time.get_ticks()\n self.flashlight.on=True\n\n #darkness condition\n if self.transition:\n self.darkness_transition(self.player)\n self.kidnap(self.player)\n\n # win condition\n if pg.sprite.spritecollide(self.player, self.win, False, collide_hit_rect):\n menu.win_menu()\n\n #got hit condition\n hit=pg.sprite.spritecollide(self.player, self.threat, False, collide_hit2_rect)\n if hit:\n self.hit(self.player, hit[0])\n \n #mirror\n self.portal(self.player)\n self.portal(self.monster)",
"def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass",
"def _check_keydown_play_events(self, event):\n\t\tif (event.key in (pygame.K_SPACE, pygame.K_UP)) and (\n\t\t\tself.bolan.rect.y >= self.bolan.default_y):\n\t\t\tself.bolan.is_jump = True\n\t\tif event.key == pygame.K_DOWN:\n\t\t\tself.bolan.is_duck = True",
"def main():\n p.init() # Initializing pygame object\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = ChessEngine.GameState()\n\n valid_moves = gs.get_valid_moves()\n\n # Flag to control the number of times get valid moves is called\n # Only if the user makes a valid move, it is called\n move_made = False\n\n load_images()\n game_running = True\n\n sq_selected = tuple() # (row, col), keeps track of user click\n player_clicks = list() # 2 tuples in the list, [(row, col), (row, col)]\n\n while game_running:\n\n for e in p.event.get():\n if e.type == p.QUIT:\n game_running = False\n\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z: # undo when 'z' is pressed\n gs.undo_move()\n move_made = True # On undo we need to generate all valid moves again\n\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # Gets (col, row) location of mouse click\n row = location[1] // SQ_SIZE\n col = location[0] // SQ_SIZE\n\n # If user clicks on the same square again, i.e. as source and destination,\n # then we deselect it and reset player clicks\n if sq_selected == (row, col):\n sq_selected = tuple()\n player_clicks = list()\n else:\n if not (len(player_clicks) == 0 and gs.board[row][col] == gs.EMPTY_SQ):\n sq_selected = (row, col)\n player_clicks.append(sq_selected) # Append both first and second clicks\n\n # After second click only\n if len(player_clicks) == 2:\n move = ChessEngine.Move(start_sq=player_clicks[0], end_sq=player_clicks[1], board=gs.board)\n # move.print_move()\n for i in range(len(valid_moves)):\n\n if move == valid_moves[i]:\n gs.make_move(valid_moves[i])\n move_made = True\n\n player_clicks = list() # Resetting to restart the 2 click move logic\n sq_selected = tuple()\n if not move_made:\n player_clicks = [sq_selected]\n\n if move_made:\n valid_moves = gs.get_valid_moves()\n move_made = False\n\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()",
"def play(self):\n self.accept(\"wheel_up\", self.scrollindex, [-1] )\n self.accept(\"wheel_down\", self.scrollindex, [1] )\n self.accept(\"arrow_up\", self.scrollindex, [-1] )\n self.accept(\"arrow_down\", self.scrollindex, [1] )\n self.accept(\"enter\", self._click)\n if callable(self.data['exit']): self.accept(\"escape\", self.data['exit'])\n for item in self.canvas[\"items\"]: item['state']=DGG.NORMAL",
"def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)",
"def check_keydown_events(event, wof_settings, screen, hero, bombs):\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n hero.moving_right = True\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n hero.moving_left = True\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n hero.moving_up = True\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n hero.moving_down = True\n elif event.key == pygame.K_SPACE:\n put_bomb(wof_settings,screen,hero,bombs)",
"def check_events(si_settings, screen,stats,sb,play_button, ship,aliens, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, si_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x,mouse_y = pygame.mouse.get_pos()\n check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y)",
"def do_event(self, event):\n self.event = event\n self.event_type = event.type\n self.event_name = pygame.event.event_name(event.type)\n self.surf_list = []\n if event.type == QUIT:\n self.active = False\n \n elif event.type == KEYDOWN:\n self.event_key = event.key\n self.event_mod = event.mod\n self.event_unicode = event.unicode\n if event.key == K_ESCAPE:\n self.active = False\n elif event.key == K_RETURN:\n self.play(self)\n \n elif event.key in self.dirs:\n self.dir = np.array(self.dirs[event.key])\n self.pos += self.dir\n \n self.pos[0] = min(max(self.pos[0], 0), self.n-1)\n self.pos[1] = min(max(self.pos[1], 0), self.m-1)\n self.move(self)\n elif event.key in self.keys:\n self.keys[event.key](self)\n \n elif event.type == MOUSEMOTION:\n self.event_pos = event.pos\n self.event_rel = event.rel\n self.pos = self.get_index(*event.pos)\n if self.mouse_down:\n (x, y) = event.pos\n x -= self.dx//2\n y -= self.dy//2\n self.surf_list.append((self.cursor_img, (x, y)))\n \n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_down = True\n (i, j) = self.get_index(*event.pos)\n t = self.T[i, j]\n if t != 0 and len(self.images)>0:\n self.cursor_img = self.images[t]\n self.T[i, j] = 0\n self.cursor_val = t\n \n elif event.type == MOUSEBUTTONUP:\n self.mouse_down = False\n (i, j) = self.get_index(*event.pos)\n self.pos = [i, j] \n t = self.T[i, j]\n if t == 0 and len(self.images) > 0:\n self.T[i, j] = self.cursor_val\n self.play(self)",
"def switch_pattern(self):\n event = self.event_deque.popleft()\n event.reset()\n self.event_deque.append(event)\n self.last_switch = pygame.time.get_ticks()",
"def get_event(self, event):\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RETURN:\n print(self.game.current_room_no)\n if event.key == pg.K_BACKSPACE:\n print(self.game.room.room_no_list)\n if event.key == pg.K_a:\n self.is_moving_left = True \n self.move_left()\n if event.key == pg.K_d:\n self.is_moving_right = True \n self.move_right()\n if event.key == pg.K_w:\n self.is_moving_up = True\n self.move_up() \n if event.key == pg.K_s:\n self.is_moving_down = True\n self.move_down() \n if event.type == pg.KEYUP:\n if event.key == pg.K_a:\n if self.is_moving_right == True:\n self.is_moving_left = False \n self.move_right()\n else:\n self.is_moving_left = False \n self.stopX()\n if event.key == pg.K_d:\n if self.is_moving_left == True:\n self.is_moving_right = False\n self.move_left() \n else:\n self.is_moving_right = False \n self.stopX()\n if event.key == pg.K_w:\n if self.is_moving_down == True:\n self.is_moving_up = False \n self.move_down()\n else:\n self.is_moving_up = False \n self.stopY()\n if event.key == pg.K_s:\n if self.is_moving_up == True:\n self.is_moving_down = False \n self.move_up()\n else:\n self.is_moving_down = False \n self.stopY()\n\n if event.type == pg.MOUSEBUTTONDOWN and event.button == 1:\n self.is_shooting = True\n elif event.type == pg.MOUSEBUTTONUP and event.button == 1:\n self.is_shooting = False",
"def play(self, event):\n\n # locate second column and row when player click on a square\n colrow_tuple = self.board.find_coords_of_selected_sq(event)\n\n # save the col and row as variable\n corner_two_col, corner_two_row = colrow_tuple[0], colrow_tuple[1]\n\n # calculations to get the key to help locate specific square on\n # the unused dictionary of squares left to play\n col_fl, row_fl = self.board.floor_of_row_col(event.x, event.y)\n rowcol_key = self.board.convert_to_key(col_fl, row_fl)\n\n try:\n self.unused_squares_dict[rowcol_key]\n except KeyError:\n return\n\n if self.player1_turn == True:\n self.add_to_player_sq(rowcol_key, self.player1.selected_sq)\n\n # delete from game unused dictionary of set\n self.delete_used_sq(rowcol_key)\n\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player1.color)\n\n # check game for 3 conditions: a tie, player1 win, or player2 win\n self.check_for_winner(self.player1.selected_sq, self.player1.name)\n\n # switch turn\n self.player1_turn = False\n\n else: # player2's turn\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player2.color)\n\n self.add_to_player_sq(rowcol_key, self.player2.selected_sq)\n self.delete_used_sq(rowcol_key)\n self.check_for_winner(self.player2.selected_sq, self.player2.name)\n self.player1_turn = True",
"def check_events(wof_settings, screen, hero, bombs):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):\n wof_settings.running = False \n terminate()\n elif (event.type == pygame.KEYUP and event.key == pygame.K_b):\n wof_settings.running = False\n return 'back'\n elif (event.type == pygame.KEYUP and event.key == pygame.K_n):\n wof_settings.running = False\n return 'next'\n elif (event.type == pygame.KEYUP and event.key == pygame.K_BACKSPACE):\n wof_settings.running = False\n return 'replay'\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, wof_settings, screen, hero, bombs)\n # control the Hero movements\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, hero)",
"def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)",
"def check_keydown_events(event,ai_settings,screen,ship,bullets,stats,aliens,sb):\n\t\n\tif event.key == pygame.K_RIGHT:\n\t\t# Mova a espaçonave para a direita\n\t\n\t\tship.set_moving_right(True)\n\t\n\telif event.key == pygame.K_LEFT:\n\t\t# Move a espaçonave para a esquerda\n\n\t\tship.set_moving_left(True)\n\n\telif event.key == pygame.K_SPACE:\n\n\t\tfire_bullet(ai_settings,screen,ship,bullets)\n\n\telif event.key == pygame.K_q:\n\t\tstats.file.stored_high_score(stats.high_score)\n\t\tsys.exit()\n\n\telif event.key == pygame.K_p:\n\t\tstart_game(ai_settings,screen,stats,ship,aliens,bullets,sb)",
"def action_key_press(key, cur_key_type, cur_key, draw, phys, msg, timer, board, force):\n\n\n # delete any old mouse joints prior to dealing with the next keypress\n if key != \"m\" and msg.message != \"Mouse Move\" and cur_key_type == 0:\n for jn in phys.world.joints:\n if type(jn) is b2MouseJoint:\n phys.world.DestroyJoint(jn)\n\n if not key is None and key != \"\":\n if platform == \"linux\" or platform == \"linux2\":\n window = get_active_window_title()\n elif platform == \"win32\":\n window = gw.getActiveWindow().title\n\n if not \"Board\" in window and not \"Toolbar\" in window:\n pass\n else:\n if key == 255:\n pass\n\n elif key == \"r\" and cur_key_type == 0:\n # RESET SCREEN\n if sg.popup_yes_no(\"Are you sure you want to reset?\") == \"Yes\":\n draw.reset()\n msg = Messenger(phys.options[\"screen\"][\"fps\"], board)\n msg.set_message(\"Reset\")\n board.reset = True\n\n elif key == \"q\" and cur_key_type == 0:\n # QUIT\n msg.set_message(\"Quit\")\n val = sg.popup_yes_no(\"Are you sure you want to quit?\")\n if val == \"Yes\":\n board.run = False\n\n\n elif key == \"z\" and cur_key_type == 0:\n # SPAWN\n msg.set_message(\"Spawn\")\n phys.create_block()\n\n elif key == \"u\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Remove Joints\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"x\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Delete\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"p\" and cur_key_type == 0:\n # draw polygon\n draw.reset()\n # msg.set = {\"Dynamic Block\": draw.get_draw_type()}\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Dynamic Block\")\n\n elif key == \"g\" and cur_key_type == 0:\n # draw ground\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Static Block\")\n # options = {\"Static Block\": draw.get_draw_type()}\n\n # cur_key = msg.auto_set(options, key, force)\n\n elif key == \"i\" and cur_key_type == 0:\n # draw terrain\n\n draw.reset()\n options = {\"Generate Terrain\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n draw, phys, board = create_terrain(draw, phys, board=board)\n\n\n elif key == \"f\" and cur_key_type == 0:\n # draw fragments or select\n draw.reset()\n options = {\n \"Fragment Select\": SelectType.select} # \"Fragment Poly\": SelectType.draw, \"Frament Rectangle\": SelectType.rectangle,\n # \"Frament Select\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 0:\n # fire polygon\n draw.reset()\n options = {\"Create\": SelectType.select_point, \"Fire Block\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Joint Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \";\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Player Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Rotate\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"m\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Mouse Move\": SelectType.select, \"Normal Move\": SelectType.null, \"Joint Move\": SelectType.null,\n \"Clone Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"t\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Transform\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"e\" and cur_key_type == 0:\n # draw ropes\n if sg.popup_yes_no(\"Are you sure you want to kill all blocks?\") == \"Yes\":\n draw.reset()\n phys.kill_all(static=False)\n msg.set_message(\"Remove Blocks\")\n cur_key = \"e\"\n\n elif key == \"v\" and cur_key_type == 0:\n # draw ropes\n draw.reset()\n msg.set_message(\"Set Spawn\")\n cur_key = \"v\"\n\n elif key == \"h\" and cur_key_type == 0:\n # draw fragment ALL players\n # cur_key = \"h\"\n msg.set_message(\"Frag All\")\n draw.reset()\n blocks = [bl for bl in phys.block_list if not bl.static is True and not bl.is_terrain is True]\n phys.fractal_block(blocks, create=False, board=board)\n\n elif key == \"k\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Force\")\n\n\n elif key == \"l\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Splitter\")\n\n\n elif key == \"/\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"'\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Goal\")\n\n elif key == \"{\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Spawner\")\n\n\n elif key == \"~\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Motor Switch\")\n\n elif key == \"&\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Water\")\n\n\n elif key == \"^\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Low Gravity\")\n\n\n elif key == \"#\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Gravity Switch\")\n\n elif key == \")\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Center\")\n\n elif key == \"%\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Sticky\")\n\n elif key == \"£\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Enlarger\")\n\n\n elif key == \"$\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Shrinker\")\n\n\n elif key == \"0\" and cur_key_type == 0:\n # pause physics\n phys.force_draw_all = not phys.force_draw_all\n options = {\"Draw All\": SelectType.null, \"Draw Set\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"o\" and cur_key_type == 0:\n # pause physics\n draw.reset()\n phys.pause = not phys.pause\n msg.set_message(\"Pause\" + (\" On\" if phys.pause is True else \" Off\"))\n cur_key = \"o\"\n\n elif key == \"*\" and cur_key_type == 0:\n # PICKLE BOARD\n name, blurb = save_gui()\n if not name is None:\n pickler(timer, phys, draw, board, msg, name, blurb)\n msg.set_message(\"State Saved\")\n cur_key = \"*\"\n draw.reset()\n\n elif key == \"-\":\n # LOAD BOARD\n\n timer, phys, draw, board, msg = load_gui(timer, phys, draw, board, msg, persistant=False)\n config = phys.config\n\n elif key == \"5\" and cur_key_type == 0:\n\n load_options()\n phys.change_config(board=board)\n\n elif key == \"6\" and cur_key_type == 0:\n\n board, phys, msg = update_background(board, phys, msg)\n\n\n elif key == \"j\" and cur_key_type == 0:\n # draw joints\n draw.reset()\n options = {\"Merge Blocks\": SelectType.select,\n \"Distance Joint\": SelectType.straight_join, \"Rope Joint\": SelectType.straight_join,\n \"Prismatic Joint\": SelectType.straight_join,\n \"Electric\": SelectType.line_join,\n \"Chain\": SelectType.line_join2,\n \"Weld Joint\": SelectType.straight_join, \"Wheel Joint\": SelectType.circle,\n \"Rotation Joint\": SelectType.rotation_select, \"Pulley\": SelectType.d_straight_join}\n\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"tab\":\n # Tab key press, this switches to move mode\n if cur_key_type == 0:\n cur_key_type = 1\n msg.set_message(\"Drawing Mode Enabled\")\n draw.reset()\n else:\n cur_key_type = 0\n msg.set_message(\"Create Mode Enabled\")\n draw.reset()\n\n\n # Drawing mode buttons\n\n elif key == \"`\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Change Keys\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Screen Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Center Clicked\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"]\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Fire Bullet\": SelectType.bullet_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"[\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Choose Player\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"3\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Forwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Backwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"9\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Force\")\n\n elif key == \"0\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Force\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"5\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CCW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"6\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"7\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"8\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Impulse\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"!\" and cur_key_type == 1:\n \"\"\"\n Used to attach an relative impulse to a block\n \"\"\"\n board.translation = np.array([0, 0])\n\n # do move keypresses:\n if cur_key_type == 1:\n phys.do_keypress(key)\n\n return cur_key_type, cur_key, draw, phys, msg, timer, board",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keyDown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyUP_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)",
"def check_events_welcome_screen(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n\n # If settings window is on\n if button_status.welcome_screen_settings_display == True:\n # Turn sound on\n if Rect(510, 333, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = True\n # Turn sound off\n elif Rect(560, 333, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = False\n # Turn music on\n elif Rect(510, 403, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = True\n # Turn music off\n elif Rect(560, 403, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = False\n\n # Change Theme\n elif Rect(510, 470, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Lith Harbor'\n change_bg_music('Lith Harbor')\n\n elif Rect(670, 470, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Leafre'\n change_bg_music('Leafre')\n\n elif Rect(510, 540, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Pantheon'\n change_bg_music('Pantheon')\n\n elif Rect(670, 540, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Ellinia'\n change_bg_music('Ellinia')\n\n # change AI speeding\n elif Rect(550, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '1000'\n\n elif Rect(650, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '2000'\n\n elif Rect(750, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '3000'\n\n # Quit settings window\n elif Rect(870, 300, 30, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.welcome_screen_settings_display = False\n\n elif button_status.rules_display == True:\n # When we click on '>'\n if Rect(640, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) < 4:\n button_status.rules_page_id = str(int(button_status.rules_page_id)+1)\n else:\n pass\n # When we click on '<'\n elif Rect(540, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) > 1:\n button_status.rules_page_id = str(int(button_status.rules_page_id)-1)\n else:\n pass\n\n elif Rect(975, 35, 25, 25).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = False\n\n else:\n # Click on single player\n if Rect(402, 269, 396, 62).collidepoint(pygame.mouse.get_pos()):\n screen_status.welcome_screen_display = False\n screen_status.prepare_screen_display = True\n if user.name == '':\n button_status.text_input_box_display = True\n else:\n pass\n # Click on multiplayer\n elif Rect(434, 370, 333, 61).collidepoint(pygame.mouse.get_pos()):\n screen_status.welcome_screen_display = False\n screen_status.lobby_screen_display = True\n player2.identity = 'pvp'\n if user.name == '':\n button_status.text_input_box_display = True\n else:\n pass\n #enter_as_network_client(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2)\n # Click on settings\n elif Rect(474, 469, 253, 62).collidepoint(pygame.mouse.get_pos()):\n button_status.welcome_screen_settings_display = True\n\n # Click on rules\n elif Rect(517, 570, 167, 61).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = True\n\n # click on exit\n elif Rect(541, 670, 119, 61).collidepoint(pygame.mouse.get_pos()):\n sys.exit()",
"def _check_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()",
"def handle_keyboard_event(self, event, **kwargs):\n ok, collision = False, None\n key_pressed = pygame.key.get_pressed()\n if key_pressed[pygame.K_x]:\n sys.exit(0)\n if not self.select_from_menu:\n ok, collision = self._handle_keyboard_grid_event(key_pressed)\n else:\n self._handle_keyboard_menu_event(key_pressed)\n if key_pressed[pygame.K_RETURN]:\n event = self.menu.select_highlighted(source=self.battle_source, target=self.battle_target)\n self.menu.visible = False\n self.select_from_menu = False\n if event:\n kwargs[GHandler.EBUCKET].append(event)\n if not ok and collision:\n self.menu.visible = True\n self.select_from_menu = True\n self.battle_source = self.actor\n self.battle_target = collision.solid_object\n\n super(GameScene, self).handle_keyboard_event(event, **kwargs)",
"def test_040_mouse_keyboard(self):\n self.allow_service('qubes.InputMouse')\n self.allow_service('qubes.InputKeyboard')\n self.setUpDevice(mouse_events + keyboard_events)\n dev_name = '{}: {}'.format(\n self.vm.name if hasattr(self, 'vm') else 'remote',\n 'Test input device')\n self.find_device_and_start_listener('pointer:' + dev_name)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.find_device_and_start_listener('keyboard:' + dev_name)\n\n self.emit_click('KEY_A')\n self.emit_click('KEY_B')\n self.emit_click('KEY_C')\n self.emit_click('KEY_D')\n for _ in range(4):\n self.emit_click('KEY_BACKSPACE')\n\n for key in ('38', '56', '54', '40'):\n self.assertEvent(['RawKeyPress', key, {}])\n self.assertEvent(['RawKeyRelease', key, {}])\n for _ in range(4):\n self.assertEvent(['RawKeyPress', '22', {}])\n self.assertEvent(['RawKeyRelease', '22', {}])",
"def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')",
"def bindHotkeys(self):\r\n self.root.bind(\"s\",self.pause)\r\n self.root.bind(\"p\",self.play)\r\n self.root.bind(\"x\",self.stop)\r\n self.root.bind(\"<Right>\",lambda event, t=10: self.skipFor(event,t=t))\r\n self.root.bind(\"<Left>\",lambda event, t=-10: self.skipFor(event,t=t))\r\n self.bindDPHotkeys()"
] | [
"0.7081879",
"0.6245746",
"0.61995095",
"0.61223036",
"0.5956481",
"0.58805364",
"0.5852858",
"0.5740237",
"0.57229996",
"0.57229334",
"0.57194364",
"0.571404",
"0.5688867",
"0.56664747",
"0.5663727",
"0.56603277",
"0.56460625",
"0.5638092",
"0.5631298",
"0.5616782",
"0.5612591",
"0.5601155",
"0.55901194",
"0.55827737",
"0.5577245",
"0.5572238",
"0.556428",
"0.5536535",
"0.55344665",
"0.55284804"
] | 0.7606982 | 0 |
This submits the next user input to the controller, In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything unless designating values for prepared wild cards, at which time the mouse is ignored unless you want to clear the prepared cards. In games with Shared_Board = True wilds on board might change designation upon other cards being played. IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then it must be designated before play is completed. This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled. | def nextEvent(self):
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nextEventWildsOnBoard(self):\n\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n else:\n # in Shared_Board games, check if there are wilds that need to be updated.\n # All other events are ignored until play is finished.\n HandManagement.wildsHiLoGetInput(self)",
"def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()",
"def next_cmd(self):\n if not self.validate():\n self.initial_focus.focus_set()\n return\n self.player_ships[self.values[0]] = self.values[1]\n self.num_players += 1\n self.e1.delete(0, END)\n self.buttonbox()\n self.e1.focus_set()\n self.e2.reset()",
"def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))",
"def play_keyboard_input_game(self):\n self.reset()\n while(not self._exit):\n pg.event.pump()\n self.clock.tick(self.actions_per_second)\n self.check_for_exit()\n self.perform_keyboard_actions()\n self.check_for_end_game()\n self.render()\n self.debug_to_console()\n\n self.cleanup()",
"def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount",
"def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')",
"def play_human_move(self):\n success, info = self.gms.play_human_move(raw_input('Make your next move\\n'.format('')))\n if success:\n print(self.gms.game.get_board_state_pretty())\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n self.play_human_move()\n elif info['status_code'] in [\n core_constants.GAME_STATUS_OVER_DRAW,\n core_constants.GAME_STATUS_OVER_HUMAN_WINNER,\n core_constants.GAME_STATUS_OVER_COMP_WINNER,\n ]:\n print(self.gms.status_code_message_map[info['status_code']])\n else:\n if info['error_code'] == core_constants.ERROR_CODE_INVALID_MOVE:\n self.play_human_move()",
"def inputMove(self):\n # Check if BoardData_update is still opended\n self.checkOpenStatus()\n\n self.genDataFiles(self.player.getCurrentPieceList())\n print(\"PieceRecog.exe\", len(self.player.getCurrentPieceList()))\n\n # Call the recognition function\n os.system(\"PieceRecog.exe \" + str(len(self.player.getCurrentPieceList())))\n\n # Check if BoardData_update is still opended\n self.checkOpenStatus()\n self.player.updateData('src\\\\BoardData_update.csv')\n self.player.getChessPieceList('src\\\\BoardData_update.csv')\n self.pieceList = self.player.getCurrentPieceList()\n # Update the simulation board\n self.playGUI.drawBoard(self.player)\n\n check = self.setNextMove_AB()\n\n if check == 0:\n return True\n\n else:\n pd.DataFrame(self.player.getCurrentPieceList()).to_csv('src\\\\BoardData_update.csv', index=False)\n print(\"Piecelist after Alpha beta: \", self.pieceList)\n return check",
"def run_game(self, board):\n run_program = True\n\n while run_program:\n # eventlistener for mouse events\n for event in pygame.event.get():\n if pygame.mouse.get_pressed() and event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Get position of mouse.\n (x, y) = pygame.mouse.get_pos()\n\n # Set circle position in the middle of the grid_square.\n draw_x = x - (x % self.square_size) + self.square_mid\n\n # Calculation to get xPosition from selected Mouse xPosition.\n x = x // 80\n\n # Check if column is full before placing. Break out if that's the case.\n if self.check_if_column_full(board, x):\n break\n\n # Calculate the yPosition, where the chip should be placed with various helper methods.\n draw_y = self.height - (self.square_size * self.draw_dict_mapping[self.get_y_pos(board, x)]) + 40\n\n # Check, which players turn it is.\n if self.playerOne:\n # Player Ones turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 1\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 1):\n run_program = False\n self.switch_player()\n else:\n # Player Twos turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 2\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 2):\n run_program = False\n self.switch_player()\n\n if event.type == pygame.KEYDOWN:\n # End the game with escape.\n if event.key == pygame.K_ESCAPE:\n self.draw = True\n run_program = False\n\n # End the Program with the X in the upper right corner.\n elif event.type == pygame.QUIT:\n self.draw = True\n run_program = False\n\n pygame.display.flip()\n self.game_over(self.playerOne, self.draw)\n # wait for given time and end the game\n pygame.time.wait(5000)\n pygame.quit()",
"def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")",
"def main():\n board = [\n [' ', ' ', ' '],\n [' ', ' ', ' '],\n [' ', ' ', ' ']\n ]\n counter = 0\n\n while not check_victory(board):\n # This is called the game loop. It keeps the game running until it is finished.\n # On every iteration of the loop we check to see if a player has won.\n\n # Show the board to the player.\n show_board(board)\n\n # Take input to add a new token.\n board = take_input(board, OPTIONS[counter % 2])\n\n counter += 1",
"def main():\n board_state = [['_', '_', '_'],\n ['_', '_', '_'],\n ['_', '_', '_']]\n\n player_turn = int(input(\"Who goes first - select AI(0) or Human(1)? \").strip())\n human_marker = input(\"Select marker - 'X' or 'O'? \").strip()\n \n play(board_state, player_turn, human_marker, 0)",
"def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")",
"def action_key_press(key, cur_key_type, cur_key, draw, phys, msg, timer, board, force):\n\n\n # delete any old mouse joints prior to dealing with the next keypress\n if key != \"m\" and msg.message != \"Mouse Move\" and cur_key_type == 0:\n for jn in phys.world.joints:\n if type(jn) is b2MouseJoint:\n phys.world.DestroyJoint(jn)\n\n if not key is None and key != \"\":\n if platform == \"linux\" or platform == \"linux2\":\n window = get_active_window_title()\n elif platform == \"win32\":\n window = gw.getActiveWindow().title\n\n if not \"Board\" in window and not \"Toolbar\" in window:\n pass\n else:\n if key == 255:\n pass\n\n elif key == \"r\" and cur_key_type == 0:\n # RESET SCREEN\n if sg.popup_yes_no(\"Are you sure you want to reset?\") == \"Yes\":\n draw.reset()\n msg = Messenger(phys.options[\"screen\"][\"fps\"], board)\n msg.set_message(\"Reset\")\n board.reset = True\n\n elif key == \"q\" and cur_key_type == 0:\n # QUIT\n msg.set_message(\"Quit\")\n val = sg.popup_yes_no(\"Are you sure you want to quit?\")\n if val == \"Yes\":\n board.run = False\n\n\n elif key == \"z\" and cur_key_type == 0:\n # SPAWN\n msg.set_message(\"Spawn\")\n phys.create_block()\n\n elif key == \"u\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Remove Joints\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"x\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Delete\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"p\" and cur_key_type == 0:\n # draw polygon\n draw.reset()\n # msg.set = {\"Dynamic Block\": draw.get_draw_type()}\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Dynamic Block\")\n\n elif key == \"g\" and cur_key_type == 0:\n # draw ground\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Static Block\")\n # options = {\"Static Block\": draw.get_draw_type()}\n\n # cur_key = msg.auto_set(options, key, force)\n\n elif key == \"i\" and cur_key_type == 0:\n # draw terrain\n\n draw.reset()\n options = {\"Generate Terrain\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n draw, phys, board = create_terrain(draw, phys, board=board)\n\n\n elif key == \"f\" and cur_key_type == 0:\n # draw fragments or select\n draw.reset()\n options = {\n \"Fragment Select\": SelectType.select} # \"Fragment Poly\": SelectType.draw, \"Frament Rectangle\": SelectType.rectangle,\n # \"Frament Select\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 0:\n # fire polygon\n draw.reset()\n options = {\"Create\": SelectType.select_point, \"Fire Block\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Joint Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \";\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Player Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Rotate\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"m\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Mouse Move\": SelectType.select, \"Normal Move\": SelectType.null, \"Joint Move\": SelectType.null,\n \"Clone Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"t\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Transform\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"e\" and cur_key_type == 0:\n # draw ropes\n if sg.popup_yes_no(\"Are you sure you want to kill all blocks?\") == \"Yes\":\n draw.reset()\n phys.kill_all(static=False)\n msg.set_message(\"Remove Blocks\")\n cur_key = \"e\"\n\n elif key == \"v\" and cur_key_type == 0:\n # draw ropes\n draw.reset()\n msg.set_message(\"Set Spawn\")\n cur_key = \"v\"\n\n elif key == \"h\" and cur_key_type == 0:\n # draw fragment ALL players\n # cur_key = \"h\"\n msg.set_message(\"Frag All\")\n draw.reset()\n blocks = [bl for bl in phys.block_list if not bl.static is True and not bl.is_terrain is True]\n phys.fractal_block(blocks, create=False, board=board)\n\n elif key == \"k\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Force\")\n\n\n elif key == \"l\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Splitter\")\n\n\n elif key == \"/\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"'\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Goal\")\n\n elif key == \"{\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Spawner\")\n\n\n elif key == \"~\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Motor Switch\")\n\n elif key == \"&\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Water\")\n\n\n elif key == \"^\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Low Gravity\")\n\n\n elif key == \"#\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Gravity Switch\")\n\n elif key == \")\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Center\")\n\n elif key == \"%\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Sticky\")\n\n elif key == \"£\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Enlarger\")\n\n\n elif key == \"$\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Shrinker\")\n\n\n elif key == \"0\" and cur_key_type == 0:\n # pause physics\n phys.force_draw_all = not phys.force_draw_all\n options = {\"Draw All\": SelectType.null, \"Draw Set\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"o\" and cur_key_type == 0:\n # pause physics\n draw.reset()\n phys.pause = not phys.pause\n msg.set_message(\"Pause\" + (\" On\" if phys.pause is True else \" Off\"))\n cur_key = \"o\"\n\n elif key == \"*\" and cur_key_type == 0:\n # PICKLE BOARD\n name, blurb = save_gui()\n if not name is None:\n pickler(timer, phys, draw, board, msg, name, blurb)\n msg.set_message(\"State Saved\")\n cur_key = \"*\"\n draw.reset()\n\n elif key == \"-\":\n # LOAD BOARD\n\n timer, phys, draw, board, msg = load_gui(timer, phys, draw, board, msg, persistant=False)\n config = phys.config\n\n elif key == \"5\" and cur_key_type == 0:\n\n load_options()\n phys.change_config(board=board)\n\n elif key == \"6\" and cur_key_type == 0:\n\n board, phys, msg = update_background(board, phys, msg)\n\n\n elif key == \"j\" and cur_key_type == 0:\n # draw joints\n draw.reset()\n options = {\"Merge Blocks\": SelectType.select,\n \"Distance Joint\": SelectType.straight_join, \"Rope Joint\": SelectType.straight_join,\n \"Prismatic Joint\": SelectType.straight_join,\n \"Electric\": SelectType.line_join,\n \"Chain\": SelectType.line_join2,\n \"Weld Joint\": SelectType.straight_join, \"Wheel Joint\": SelectType.circle,\n \"Rotation Joint\": SelectType.rotation_select, \"Pulley\": SelectType.d_straight_join}\n\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"tab\":\n # Tab key press, this switches to move mode\n if cur_key_type == 0:\n cur_key_type = 1\n msg.set_message(\"Drawing Mode Enabled\")\n draw.reset()\n else:\n cur_key_type = 0\n msg.set_message(\"Create Mode Enabled\")\n draw.reset()\n\n\n # Drawing mode buttons\n\n elif key == \"`\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Change Keys\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Screen Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Center Clicked\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"]\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Fire Bullet\": SelectType.bullet_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"[\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Choose Player\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"3\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Forwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Backwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"9\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Force\")\n\n elif key == \"0\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Force\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"5\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CCW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"6\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"7\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"8\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Impulse\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"!\" and cur_key_type == 1:\n \"\"\"\n Used to attach an relative impulse to a block\n \"\"\"\n board.translation = np.array([0, 0])\n\n # do move keypresses:\n if cur_key_type == 1:\n phys.do_keypress(key)\n\n return cur_key_type, cur_key, draw, phys, msg, timer, board",
"def play(self):\n for step_i in range(self.max_step):\n player_id = step_i & 1\n player = self.players[player_id]\n action = player.nxt_move()\n if isinstance(player, mcts.MCTSPlayer) and player.value_net.loggable:\n print(f'Player{player_id}: Action: {action}')\n if not self.is_valid_action(action):\n # because now just consider 2 players\n print(f\"Player: {player_id}, Action: {action} Did Not choose a valid action!\")\n self.board[action // self.w][action % self.w] = player_id\n self.winner = 1 - player_id\n else:\n self.board[action // self.w][action % self.w] = player_id\n self.winner = self.k0()\n self.players[1 - player_id].other_nxt_move(action)\n if self.winner != -1:\n break\n print(f'Winner: {self.winner}')\n for player_id in range(len(self.players)):\n self.players[player_id].game_ended()",
"def user_input(self):\n\n # Above, we set the timeout of getch() on entryscreen to 500ms. That means\n # that the invalid character (-1) is returned every 500 ms if the user\n # enters nothing, and our validator is called. We take this opportunity to\n # relese the curses lock so any other threads (e.g. the message handling\n # thread) have a chance to update the screen. Additionally, we call\n # update() so that any other changes are picked up. We raise _StoppedError\n # to get out of the surrounding loop in edit() so that we can exit this\n # function cleanly and without hijacking any other exceptions (such as\n # KeyboardInterrupt).\n\n class _StoppedError(Exception):\n pass\n\n def validator(ch):\n if ch == curses.KEY_RESIZE:\n self.chatscreen.clear()\n (y, x) = self.global_screen.getmaxyx()\n curses.resizeterm(y, x)\n self.chatscreen.resize(y-Chat.CHATBOX_SIZE, x)\n self.entryscreen.mvwin(y-Chat.CHATBOX_SIZE, 0)\n self.update()\n return None\n try:\n self.curses_lock.release()\n if not self.running:\n raise _StoppedError\n self.update() # has anything changed?\n if ch < 0:\n return None\n return ch\n finally:\n self.curses_lock.acquire()\n\n try:\n self.curses_lock.acquire()\n cmd = self.textpad.edit(validator)\n self.entryscreen.clear()\n except _StoppedError:\n return ''\n finally:\n self.curses_lock.release()\n\n # strip the newlines out of the middle of the words\n cmd = string.replace(cmd, '\\n', '')\n\n # remove unprintable characters\n cmd = (''.join(c if c in string.printable else '' for c in cmd)).strip()\n\n # process commands if necessary\n if cmd.startswith('/'):\n words = cmd.split()\n cmdname = words[0][1:]\n args = words[1:]\n\n if cmdname in self.commands:\n try:\n self.commands[cmdname](*args)\n except CommandError as e:\n self.message('System:', 'Problem executing command: ' + str(e))\n except TypeError as e:\n self.message('System:', str(e))\n else:\n self.message('System:', 'Unknown command: '+cmdname)\n else:\n # it's not a cmd so it must be a message to send\n self.q.put(cmd)\n self.update()",
"def move(self,board,n,display):\n\n\t\tmove = False\n\t\tgoodInput = False\n\t\tn = 0\n\n\t\twhile not goodInput:\n\t\t\tpygame.time.wait(10)\n\t\t\tdisplay.displayBoard()\n\t\t\tmove = display.getMove()\n\n\t\t\tif move == \"End Preset\":\n\t\t\t\treturn move\n\n\t\t\tif move and tuple(move) in board.openPoints():\n\t\t\t\tgoodInput = True\n\t\t\telif move:\n\t\t\t\tprint \"Bad input, try again!\"\n\n\t\t\tn += 1\n\n\t\treturn move",
"def host_game(self):\n current_side = \"X\"\n while ( (not self.win_for(\"X\"))\n and (not self.win_for(\"O\"))\n and (not self.is_full())):\n print()\n print(self)\n print()\n move = Board.INVALID_MOVE\n while not self.allows_move(move):\n move = int(input(current_side + \"'s move: \"))\n self.add_move(move, current_side)\n if current_side == \"X\":\n current_side = \"O\"\n else:\n current_side = \"X\"\n\n if self.win_for(\"X\"):\n print(\"X wins --- congratulations!\\n\")\n elif self.win_for(\"O\"):\n print(\"O wins --- congratulations!\\n\")\n else:\n print(\"Tied game!\\n\")\n\n print()\n print(self)",
"def human_expert(_obs):\n\n while True:\n env.render()\n print_play_keys(env.action_str)\n time.sleep(0.2)\n key_pressed = keyboard.read_key()\n # return index of action if valid key is pressed\n if key_pressed:\n if key_pressed in KEY_ACTION_DICT:\n return KEY_ACTION_DICT[key_pressed]\n elif key_pressed == \"esc\":\n print(\"You pressed esc, exiting!!\")\n break\n else:\n print(\"You pressed wrong key. Press Esc key to exit, OR:\")",
"def input(self, event):\n # If the window is quit.\n if event.type == pygame.QUIT:\n # Exit the game.\n return 0\n\n # If escape is hit.\n if (\n event.type == pygame.QUIT\n or event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE\n ):\n # Return to the menu.\n return 1\n\n # If SPACE is hit.\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n # If the player can move\n if self.background1.getMoving():\n # Jump sound effect.\n self.jumpSound.play()\n # Make the player jump.\n self.player.jump()\n\n # If game end.\n if self.gameEnd:\n # If the exit button is pressed.\n if self.exitButton.input(event):\n return 1\n # If the exit button is pressed.\n if self.retryButton.input(event):\n self.reset()\n\n # Continue the game.\n return 2",
"def main():\n \n is_white_move = True\n board = initial_state()\n print_board(board)\n \n while True:\n if is_white_move == True:\n print()\n result = str(input(\"White's move: \"))\n else:\n print()\n result = str(input(\"Black's move: \"))\n\n if result == 'h' or result == 'H':\n print(HELP_MESSAGE)\n print_board(board)\n elif result == 'q' or result == 'Q':\n confirm_quit = str(input(\"Are you sure you want to quit? \"))\n if confirm_quit == 'y' or confirm_quit == \"Y\":\n break\n else:\n print_board(board) \n\n else:\n if valid_move_format(result) == False:\n print('Invalid move')\n print()\n print_board(board)\n else:\n move = process_move(result)\n if is_move_valid(move, board, is_white_move): \n board = update_board(board, move)\n print_board(board)\n is_white_move = not is_white_move\n if check_game_over(board, is_white_move):\n break\n else:\n print('Invalid move')\n print()\n print_board(board)",
"def receive_play(current_player, marks, board_state):\n valid_answer = False\n while not valid_answer:\n current_play = input(\n \"{}, choose a square (1-9) to place your {}. \".format(current_player, marks[current_player]))\n valid_answer = check_inputs(filter_occupied(board_state), current_play)\n if valid_answer:\n current_play = int(current_play)\n board_state[current_play - 1] = marks[current_player]\n return board_state",
"def play(state, player_turn, human_marker, depth):\n alpha = -10\n beta = 10\n while True:\n draw_board(state)\n marker = is_terminal(state)\n\n if marker is not None:\n if marker == 'X':\n print(\"The winner is 'X'!\")\n elif marker == 'O':\n print(\"The winner is 'O'!\")\n else:\n print(\"The game ended in a tie!\")\n return\n\n # Presumably AI's turn.\n if player_turn == 0:\n ai_marker = 'X' if human_marker == 'O' else 'O'\n if ai_marker == 'X':\n value, move = max_value(state, ai_marker, depth, alpha, beta)[:2]\n else:\n value, move = min_value(state, ai_marker, depth, alpha, beta)[:2]\n depth = depth + 1\n state[move[0]][move[1]] = ai_marker\n player_turn = 1\n\n # Presumably human player's turn.\n else:\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n while not is_valid_move(state, move):\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n\n state[move[0]-1][move[1]-1] = human_marker\n depth = depth + 1\n player_turn = 0",
"def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()",
"def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)",
"def human(gstate: TicTacToe, *args):\n return input_with_validation(\"Please enter move.\", list(gstate.next_moves.keys()))",
"def play_game(word_list):\n hand = None\n while True:\n game_type = raw_input('Please choose from the following: n(new random hand), r(last hand) or e(exit the game):')\n if game_type == 'n':\n hand = deal_hand(HAND_SIZE)\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):')\n elif game_type == 'r' and hand == None:\n print 'Incorrect input. Please first choose n.'\n elif game_type == 'r':\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):') \n elif game_type == 'e':\n print \"Exited the game.\"\n break\n else: \n print 'Incorrect input.'",
"def advance(self, board):",
"def play_turn(self, player):\n input('Play turn...')\n print(f'{player.name} to play...\\n')\n \n if isinstance(player, ComputerPlayer):\n print('Thinking...')\n time.sleep(1)\n row, col = player.algorithm(self.board)\n self.board.play(row, col, player.token) # algorithms index from (0,0) - so adjust this to (1,1) etc \n else:\n print(self.board)\n while True:\n usr_input = input(f'{player.name}, enter a move: ')\n \n if usr_input.lower() == 'exit':\n print(f'{player.name} exited!')\n self.exit_flag = True\n return\n\n if usr_input.lower() == 'skip':\n print(f'{player.name} has skipped their go!')\n return\n\n row, col = [int(i) for i in usr_input.split(' ')]\n try:\n self.board.play(row - 1, col - 1, player.token) # index top-left corner as (1,1) in player input, vs (0,0) everywhere else\n except IndexError as e:\n print(str(e), 'Play a different position.')\n else:\n break\n print(f'{player.name} played: ({row + 1}, {col + 1})\\n')\n print(self.board)"
] | [
"0.72409004",
"0.64886266",
"0.6171364",
"0.59191036",
"0.59139985",
"0.5860547",
"0.57915825",
"0.5791442",
"0.5777218",
"0.57734597",
"0.5769721",
"0.5763385",
"0.5751395",
"0.5743939",
"0.5728276",
"0.5692042",
"0.56845224",
"0.5682954",
"0.56781256",
"0.56780386",
"0.5671816",
"0.5670868",
"0.56655097",
"0.5663507",
"0.56496686",
"0.55943656",
"0.5592782",
"0.55787945",
"0.55751777",
"0.55740297"
] | 0.6859589 | 1 |
Confirm a user is sure about a discard and then perform it once confirmed. | def discardConfirmation(self, confirmed, wrapped_discards):
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")",
"def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True",
"async def confirm(ctx, *args: discord.Member):\n await _confirm(args)",
"def confirm(self):\n self.automatically_detected=False\n self.save()",
"def unconfirm(self):\n self.automatically_detected=True\n self.save()",
"def confirm(dt):\n\n database_api.signOut(Cache.get(\"info\",\n \"token\"\n ),\n Cache.get(\"info\",\n \"nick\"\n )\n )\n\n if platform.system() == \"Linux\":\n os.system(\"sh func/sh/restore.sh\")\n\n App.get_running_app().stop()",
"def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()",
"def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return",
"def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)",
"def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)",
"def confirm(self, message):\n raise NotImplementedError",
"def confirm_removal(confirm, filename):\n if confirm == 'y' or confirm == 'yes':\n remove_file(filename)\n elif confirm == 'n' or confirm == 'no':\n print(\"File will stay there\")\n else:\n print(\"Please etner a valid answer (y/n, yes/no)\")\n confirm_removal()",
"def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()",
"def confirm():\n\t\traise NotImplementedError",
"def confirmed(self):",
"def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()",
"def confirm():\n if request.method == 'POST':\n user_type = session.get('type', None)\n if user_type == 'Admin':\n return redirect('/index')\n elif user_type == 'Client':\n return redirect('/clients/' + session.get('name'))\n else:\n return redirect('/')\n\n confirmed = request.values['confirmed']\n \n return render_template('confirm.html', confirmed=confirmed)",
"def test_confirm_user(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.confirmation_token)\n\n user.confirm()\n\n self.assertTrue(user.confirmed)\n self.assertIsNotNone(user.confirmed_at)\n self.assertIsNone(user.confirmation_token)",
"def confirm_so(self, cr, uid, ids,context=None):\n return self.write(cr, uid, ids, {'state':'confirm_so'}, context=context)",
"def prompt_discard(self, num_discards: int, state: 'State'):\n # TODO: Refactor to allow for flexible discarding (see Cellar). Meybe a force discard and a prompt discard?\n while self.hand and num_discards > 0:\n sorted_hand = sorted(list(self.hand), key=card_sort)\n card_name = self.get_input(\n f'Discard {num_discards} cards'\n f'Hand: {sorted_hand}',\n sorted_hand,\n state\n )\n # If the prompted card is in hand, discard it\n card = next((card for card in self.hand if card.name == card_name), None)\n if card:\n self.hand[card] -= 1\n self.hand += Counter() # Remove 0 and negative counts\n self.discard_pile.append(card)\n num_discards -= 1\n print(f'Discarded {card.name}')\n else:\n print(f'{card.name} is not in hand')",
"def __onConfirmNo(self):\n self.__confDlg.reject()",
"def cancel_dummy(self):\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n else:\n self.state = 'cancel'\n self.save()",
"def confirm(secret_id='', game=None):\n player = get_player(current_app, request, secret_id)\n if player.is_confirmed:\n flash(_('Your name is already confirmed as %(n)s', n=player.name),\n 'error')\n return redirect(url_for('player.player',\n secret_id=player.secret_id,\n _method='GET'))\n if game.state != game.State.CONFIRMING:\n return render_template('player/too-late.html',\n player_name=player.name)\n if request.method == 'POST':\n player.confirm(request.form.get('player_name', ''))\n session[USER_COOKIE] = player.cookie\n return redirect(url_for('player.player',\n secret_id=player.secret_id,\n _method='GET'))\n else: # request.method == 'GET'\n return render_template('player/confirm.html',\n unconfirmed_name=player.name,\n secret_id=secret_id)",
"def confirm(self, task, log):\n self._tasks_in_process.remove(task)\n log.confirm(self._name, task.get_name(), task.get_payment())",
"def _handle_consent_confirmation(user, is_confirmed):\n if is_confirmed == \"yes\":\n # user has already given consent, continue flow\n response = server.create_authorization_response(grant_user=user)\n else:\n # user did not give consent\n response = server.create_authorization_response(grant_user=None)\n return response",
"def confirm(userid, choice, popupid):\r\n if choice:\r\n players[userid].resetSkills()",
"def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()",
"def no_going_back(confirmation):\r\n if not confirmation:\r\n confirmation = 'yes'\r\n\r\n return valid_response(\r\n 'This action cannot be undone! '\r\n 'Type \"%s\" or press Enter to abort: ' % confirmation,\r\n str(confirmation))",
"def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES",
"def confirm(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('confirm') != self.id:\n return False\n self.confirmed = True\n db.session.add(self)\n return True"
] | [
"0.6619172",
"0.65421534",
"0.63608795",
"0.6252194",
"0.62353194",
"0.6222551",
"0.61789745",
"0.6149395",
"0.61379594",
"0.6117161",
"0.6086858",
"0.6077924",
"0.6073165",
"0.60582376",
"0.60207057",
"0.60002977",
"0.59929824",
"0.5937757",
"0.593176",
"0.59203535",
"0.5888317",
"0.58854973",
"0.5876241",
"0.58461815",
"0.5845953",
"0.58290064",
"0.5824021",
"0.5810428",
"0.5774078",
"0.5760513"
] | 0.72036105 | 0 |
Test Category model data insertion/types/field attributes | def test_category_model_entry(self): # PRUEBA DE CARGAR LA INFORMACION EN LOS MODELOS A TESTEAR
data = self.data1
self.assertTrue(isinstance(data, Category)) # REALIZA EL TESTEO | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_category_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Category))\n self.assertEqual(str(data), 'django')",
"def test_create_category(self):\n pass",
"def test_category_has_access_to_model_data():\n category = Category()\n category_data = category.get_category_data()\n\n assert type(category_data) is list\n assert len(category_data) > 1",
"def test_category_model_entry(self):\n data = self.data1\n self.assertEqual(str(data), 'django')",
"def test_update_category(self):\n pass",
"def test_new_category_data(db_session):\n new_cat = Category(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_cat)\n category = db_session.query(Category).all()\n assert category[0].label == \"test_label\"\n assert category[0].desc == \"test_desc\"",
"def test_save(self, init_db):\n params = {\n 'name': fake.alphanumeric(15)\n }\n category = Category(**params)\n assert category == category.save()",
"def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])",
"def test_category_save(database):\n category = Category(title=\"Test Category\")\n category.save()\n\n assert category.title == \"Test Category\"",
"def test_create(self):\n self.assertTrue(Category.objects.exists())",
"def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)",
"def test_model_string_representation(self, init_db, category):\n assert repr(category) == f'<Category: {category.name}>'",
"def test_add_category(self):\n self.add_success(self.test_data['pants'])",
"def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)",
"def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)",
"def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))",
"def test_add_category_missing_fields(self):\n category = json.dumps({\n 'desc': \"Jamaican\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Check the keys and try again', response.data.decode())",
"def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)",
"def test_category_lowercase(self):\n self.assertEqual(self.category.category, \"test\")",
"def test_update(self, init_db, category):\n category_name = fake.alphanumeric()\n category.update(name=category_name)\n assert category.name == category_name",
"def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')",
"def insert_data_category_into_bdd(self):\n for category in constant.LIST_CATEGORIES:\n data = Category(name=category)\n data.save()\n print(\"the category \" + str(category) + \" has been created\")",
"def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)",
"def test_get(self, init_db, category):\n assert Category.get(category.id) == category",
"def test_edit_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)",
"def test_add_category_to_asset(self):\n pass",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def test_find_by_category(self):\n Pet(0, \"fido\", \"dog\").save()\n Pet(0, \"kitty\", \"cat\").save()\n pets = Pet.find_by_category(\"cat\")\n self.assertNotEqual(len(pets), 0)\n self.assertEqual(pets[0].category, \"cat\")\n self.assertEqual(pets[0].name, \"kitty\")",
"def test_category_mixed_on_edit(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 1)\n self.go200('minus_edit', [self.superuser, minus.id])\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')",
"def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])"
] | [
"0.7511274",
"0.74611753",
"0.7387198",
"0.7346792",
"0.7272893",
"0.721909",
"0.6904485",
"0.69011027",
"0.6842334",
"0.67509025",
"0.67248386",
"0.6717649",
"0.6624068",
"0.656367",
"0.6542668",
"0.6540674",
"0.65404874",
"0.6470544",
"0.646362",
"0.64630693",
"0.64449406",
"0.64380944",
"0.6420919",
"0.63919896",
"0.63862425",
"0.63689536",
"0.6335148",
"0.63275295",
"0.630453",
"0.6263592"
] | 0.80247825 | 0 |
Test product model data insertion/types/field attributes | def test_products_model_entry(self):
data = self.data1
self.assertTrue(isinstance(data, Product))
self.assertEqual(str(data), 'django beginners') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------",
"def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])",
"def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')",
"def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)",
"def setUpTestData(cls):\n Product_type.objects.create(\n name='New_Product', display_name='New Product.')",
"def test_object_creation(self):\n serializer = ProductSerializer(data=self.data)\n self.assertTrue(serializer.is_valid())\n product = serializer.save()\n\n self.assertEqual(product.title, self.title)\n self.assertEqual(product.description, self.description)\n self.assertEqual(product.price, self.price)\n self.assertTrue(product.is_active)\n self.assertTrue(product.available)",
"def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)",
"def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Recipe))\n self.assertEqual(str(data), 'django beginners')",
"def test_prep_new_data(self):\n pass",
"def test_new_product(self):\n prod = Product(name='New Product', price=100, weight=60,\n flammability=0.9)\n self.assertEqual(prod.explode(), '...BABOOM!!')\n self.assertEqual(prod.stealability(), 'Very stealable!')",
"def test_custom_attribute_post(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"my custom attribute value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"my custom attribute value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"my custom attribute value\"\n )",
"def test_update_attribute_data(self):\n pass",
"def test_custom_attribute_put_add(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n product_url = response.json[0][1][\"product\"][\"selfLink\"]\n headers = self.client.get(product_url).headers\n\n product_data[0][\"product\"][\"custom_attribute_values\"] = [{\n \"attribute_value\": \"added value\",\n \"custom_attribute_id\": cad.id,\n }]\n\n response = self._put(product_url, product_data[0], extra_headers={\n 'If-Unmodified-Since': headers[\"Last-Modified\"],\n 'If-Match': headers[\"Etag\"],\n })\n\n product = response.json[\"product\"]\n\n self.assertEqual(len(product[\"custom_attribute_values\"]), 1)\n ca_json = product[\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"added value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"added value\"\n )\n\n headers = self.client.get(product_url).headers\n\n product_data[0][\"product\"][\"custom_attribute_values\"] = [{\n \"attribute_value\": \"edited value\",\n \"custom_attribute_id\": cad.id,\n }]\n\n response = self._put(product_url, product_data[0], extra_headers={\n 'If-Unmodified-Since': headers[\"Last-Modified\"],\n 'If-Match': headers[\"Etag\"],\n })\n\n product = response.json[\"product\"]\n ca_json = product[\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"edited value\")",
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def test_prep_fields(self):\n pass",
"def setUp(self):\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test_product_detail(self):\n # first performing create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performing detail\n self._detail_model(\"product\", self.product_data, id, [\"name\", \"description\", \"image_link\", \"price\"])\n \n self.assertIsNotNone(id)",
"def test_custom_attribute_post_both(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"new value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"new value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"new value\"\n )",
"def test_defaults(self):\n p = Product.objects.create(\n name=\"Product\", slug=\"product\", sku=\"4711\", price=42.0)\n\n self.assertEqual(p.name, \"Product\")\n self.assertEqual(p.slug, \"product\")\n self.assertEqual(p.sku, \"4711\")\n self.assertEqual(p.price, 42.0)\n self.assertEqual(p.effective_price, 42.0)\n self.assertEqual(p.short_description, \"\")\n self.assertEqual(p.description, \"\")\n self.assertEqual(len(p.images.all()), 0)\n\n self.assertEqual(p.meta_title, \"<name>\")\n self.assertEqual(p.meta_description, \"\")\n self.assertEqual(p.meta_keywords, \"\")\n\n self.assertEqual(len(p.related_products.all()), 0)\n self.assertEqual(len(p.accessories.all()), 0)\n\n self.assertEqual(p.for_sale, False)\n self.assertEqual(p.for_sale_price, 0.0)\n self.assertEqual(p.active, False)\n\n self.assertEqual(p.deliverable, True)\n self.assertEqual(p.manual_delivery_time, False)\n self.assertEqual(p.delivery_time, None)\n self.assertEqual(p.order_time, None)\n self.assertEqual(p.ordered_at, None)\n self.assertEqual(p.manage_stock_amount, False)\n self.assertEqual(p.stock_amount, 0)\n\n self.assertEqual(p.weight, 0)\n self.assertEqual(p.height, 0)\n self.assertEqual(p.length, 0)\n self.assertEqual(p.width, 0)\n\n self.assertEqual(p.tax, None)\n self.assertEqual(p.sub_type, STANDARD_PRODUCT)\n\n self.assertEqual(p.default_variant, None)\n self.assertEqual(p.variants_display_type, LIST)\n\n self.assertEqual(p.parent, None)\n self.assertEqual(p.active_name, False)\n self.assertEqual(p.active_sku, False)\n self.assertEqual(p.active_short_description, False)\n self.assertEqual(p.active_description, False)\n self.assertEqual(p.active_price, False)\n self.assertEqual(p.active_images, False)\n self.assertEqual(p.active_related_products, False)\n self.assertEqual(p.active_accessories, False)\n self.assertEqual(p.active_meta_description, False)\n self.assertEqual(p.active_meta_keywords, False)",
"def new_object_data(self):\n self.product_fixture = self.F.ProductFactory.create()\n modifiers = (self.datetime, self.resource_name)\n fields = {\n u\"name\": unicode(\"test_%s_%s\" % modifiers),\n u\"description\": unicode(\"test %s %s\" % modifiers),\n u\"product\": unicode(self.get_detail_url(\n \"product\", self.product_fixture.id)),\n u\"status\": unicode(\"draft\"),\n u\"created_by\": None,\n u\"modified_by\": None,\n u\"modified_on\": self.utcnow.strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n return fields",
"def setUp(self):\n # Request the Product Id by posting it\n response = self.client.post('/api/productsdata/',\n data=json.dumps(self.product_payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.json().get('name'), 'Olive Oil')\n\n # Storing ID for further test cases checking\n type(self).product_id = response.json().get('id')",
"def test_data_object_vaporise(self):\n pass",
"def test_custom_attribute_get(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"my custom attribute value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n product_url = response.json[0][1][\"product\"][\"selfLink\"]\n get_response = self.client.get(product_url)\n product = get_response.json[\"product\"]\n self.assertIn(\"custom_attribute_values\", product)\n self.assertEqual(len(product[\"custom_attribute_values\"]), 1)\n cav = product[\"custom_attribute_values\"][0]\n self.assertIn(\"custom_attribute_id\", cav)\n self.assertIn(\"attribute_value\", cav)\n self.assertIn(\"id\", cav)",
"def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)",
"def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))",
"def test_model_saves_value_to_database( self ):\r\n\t\tretrieved_object = TestModel.objects.get( id = self.m_test_model.id )\r\n\t\tself.assertEqual( retrieved_object.custom_field, custom_data )",
"def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')",
"def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)",
"def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input"
] | [
"0.76896816",
"0.7435254",
"0.74246174",
"0.73524946",
"0.71989226",
"0.7134408",
"0.6947085",
"0.69248456",
"0.68103576",
"0.67975587",
"0.67713046",
"0.67241883",
"0.6704033",
"0.66982996",
"0.6688978",
"0.66654783",
"0.66048956",
"0.659939",
"0.6586787",
"0.6566571",
"0.6554063",
"0.6513096",
"0.6509164",
"0.64822984",
"0.641744",
"0.6379985",
"0.6377534",
"0.6374552",
"0.6373689",
"0.6363517"
] | 0.7449101 | 1 |
Returns the total, nonblank and net loc for all the python files in a directory | def get_folder_total(path):
files = os.listdir(path)
pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py']
total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 }
for filename in pythonfiles:
with open(filename, 'r') as thisfile:
blob = thisfile.read()
# print filename
thisloc = loc(blob)
for k, v in thisloc.items():
total[k] += v
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def checkSum():\n val = 0\n for ext in EXTENSION_GLOBS:\n for f in glob.glob (ext):\n stats = os.stat(f)\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n return val",
"def fileCounter(directory):",
"def getFileLoc(self):\n\t\trval = []\n\t\tlocalVolTbl = self.file_loc['localVolTbl']\n\t\tnetVolTbl = self.file_loc['netVolTbl']\n\t\t\n\t\tif localVolTbl != None:\n\t\t\trval.extend((FILE_LOC[0],\n\t\t\t\tFILE_LOC[1] + self.file_loc['basePathname'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname']))\n\t\n\t\t\tfor ii in range(len(VOL_TYPE)):\n\t\t\t\tif (self.header['file_attributes'] & (2 ** (ii + 1))) > 0:\n\t\t\t\t\trval.append(VOL_TYPE[ii])\n\t\t\t\t\n\t\t\trval.extend((FILE_LOC[2] + localVolTbl['volume_label'],\n\t\t\t\tFILE_LOC[3] + str(localVolTbl['vol_serial_num'])))\t\t\n\t\n\t\tif netVolTbl != None:\n\t\t\trval.append(FILE_LOC[4] + netVolTbl['net_sharename'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname'])\n\t\treturn rval",
"def check_dir(self):\n if not Path(self.src_dir).exists():\n print('No such directory found:', self.src_dir)\n return\n\n nc_all = self.src_dir + \"/*.nc*\"\n if len(glob.glob(nc_all)) == 0:\n print('No NetCDF files found in:', self.src_dir)\n return\n\n return nc_all",
"def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results",
"def test_case_6():\n print(\"*********Test_case_6***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir', 't1.c')\n result = find_files('.c', path)\n print(result)",
"def test_case_5():\n print(\"*********Test_case_5***********\")\n result = find_files('.c', \"\")\n print(result)",
"def execute(root_dir):\n \n \n #Getting all the file recursively that py files\n lenght=[]\n libraries=[]\n nesting_factors=[]\n param_count=[]\n total_var=[]\n duplicate_for_the_repo=[]\n average_nesting_factor=0\n average_param=0\n code_duplication=0\n avg_var=0\n \n k=root_dir.rsplit('-')\n n=k[0]\n m=k[-1]\n \n urls=[ repo for repo in repo_list if n and m in repo ]\n if urls:\n url=urls[0]\n else:\n url=root_dir\n\n for filename in glob.iglob(root_dir + '/**/*.py', recursive=True):\n #filename=filename.replace(\" \", \"\\\\ \")\n filename=str_to_raw(filename)\n try: \n count=pygount.source_analysis(filename, 'pygount') # counting the line of codes for the py files\n l=count.code\n lenght.append(l)\n library =imported_module(filename)\n for lib in library:\n libraries.append(lib)\n deg_list=nesting_factor(for_loop_position(filename)) \n for deg in deg_list:\n nesting_factors.append(deg)\n\n\n\n for param in parameter_count(filename):\n param_count.append(param)\n for var in variable_count(filename):\n total_var.append(var)\n duplicate_for_the_repo.append(duplicated_line(filename))\n except Exception as e:\n print(\"type error: \" + str(e))\n print(filename)\n \n \n if len(nesting_factors) !=0: \n average_nesting_factor= np.mean(nesting_factors)\n if param_count: \n average_param= np.mean(param_count) \n libraries=unique(libraries)\n repo_count=sum(lenght)\n if total_var:\n avg_var=np.mean(total_var)\n if repo_count and duplicate_for_the_repo:\n code_duplication=(sum(duplicate_for_the_repo)/repo_count)*100\n \n return {'repository_url': url, \n 'number of lines': repo_count, \n 'libraries': libraries,\n 'nesting factor': average_nesting_factor,\n 'code duplication': code_duplication,\n 'average parameters':average_param,\n 'average variables':avg_var}",
"def _get_run_info(self, path, creation_date):\n total = 0\n try:\n for entry in os.scandir(path):\n # Only evaluates size of files and not folders inside raw/proc\n if entry.is_file():\n # if it's a file, use stat() function\n total += entry.stat().st_size\n\n except NotADirectoryError:\n # if `path` isn't a directory, get the file size then\n total = os.path.getsize(path)\n except PermissionError:\n # if for whatever reason we can't open the folder, return 0\n return 0\n\n if os.path.isdir(path):\n validator = RunValidator(path)\n elif path.endswith(\".h5\"):\n validator = FileValidator(H5File(path).files[0])\n else:\n return 0\n\n try:\n validator.run_checks()\n except Exception:\n pass\n return total, str(ValidationError(validator.problems))",
"def test_case_3():\n print(\"*********Test_case_3***********\")\n result = find_files('.c', None)\n print(result)",
"def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)",
"def getBaseSrcFile(self) -> List[int]:\n ...",
"def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return",
"def test_case_1():\n print(\"*********Test_case_1***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('.c', path)\n for file in result:\n print(file)",
"def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)",
"def print_local_output_files_stats():\n print \"\\n\\nFILES CREATED:\"\n for filename in os.listdir('../output'):\n filesize = os.path.getsize('../output/' + filename)\n print str(filesize) + \"\\t\" + filename\n print \"\\n\"",
"def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size",
"def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass",
"def my_root_listdir(root_dir):\n root_listdir = [\n images_dir\n for images_dir in os.listdir(root_dir)\n if not any(\n characters in images_dir for characters in [\".\", \"test\", \"train\", \"valid\"]\n )\n ]\n summ = 0\n for images_dir in root_listdir:\n summ += len(os.listdir(root_dir + \"/\" + images_dir)) / 2 - 2\n print(\"Sum of images in directories: \", int(summ))\n return root_listdir",
"def retrive_scanning_scheme(self, Nest_data_directory, file_keyword = 'PMT_0Zmax'):\r\n fileNameList = []\r\n# ImgSequenceNum = 0\r\n for file in os.listdir(Nest_data_directory):\r\n if file_keyword in file:\r\n fileNameList.append(file)\r\n \r\n RoundNumberList = []\r\n CoordinatesList = []\r\n for eachfilename in fileNameList:\r\n # Get how many rounds are there\r\n try:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Grid')])\r\n except:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Coord')])\r\n \r\n RoundNumberList = list(dict.fromkeys(RoundNumberList)) # Remove Duplicates\r\n \r\n CoordinatesList.append(eachfilename[eachfilename.index('Coord'):eachfilename.index('_PMT')])\r\n CoordinatesList = list(dict.fromkeys(CoordinatesList))\r\n \r\n# print(RoundNumberList, CoordinatesList, fileNameList)\r\n return RoundNumberList, CoordinatesList, fileNameList",
"def add_loc(self):\n self.loc = 0\n for t in self.thys:\n with open(t, 'r') as f:\n for l in f:\n if l.strip():\n self.loc += 1",
"def process_files(file_location, day):\n # construct file path\n file_dir = PREFIX+file_location\n file_pattern = file_dir+'lz_'+day+'*_raw.root'\n # print(file_pattern)\n file_list = glob.glob(file_pattern)\n print(\"There are %s MC files in the requested directory (%s).\" %(len(file_list), file_dir))\n file_names = []\n for f in file_list:\n file_name_only = f.split('/')\n file_names.append(file_name_only[-1])\n return file_names",
"def scan(self,project_dir):\n ftypes = [\".csv\", \".data\", \".xlsx\"]\n print(\"Scanning directory : \",project_dir)\n print(\"Searching for : \",ftypes)\n self.localfiles = {}\n for dirpath, dirnames, filenames in os.walk(project_dir, topdown=True):\n for filename in filenames:\n for ftype in ftypes:\n if ftype in filename:\n self.localfiles[filename] = {\n \"filename\": filename,\n \"filesize\": getsize(os.path.join(dirpath, filename)),\n \"abspath\": os.path.join(dirpath, filename),\n \"dirpath\": dirpath,\n \n }\n print(\"Found These: \",[file_name for file_name in self.localfiles.keys()])",
"def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths",
"def total_files(self):\n command = \"SELECT searched FROM options;\"\n return self.c.execute(command)",
"def get_checkpoint():\n\timport numpy as np\n\n\tcheckpoint = []\n\tfor directory in directories:\n\t\ttry: # try to find folder\n\t\t\tos.chdir('./'+directory)\n\t\texcept:\n\t\t\tcontinue\n\t\tcontents = os.listdir('./')\n\t\tif contents == []: # if folder is empty\n\t\t\tprint(\"No data for\", directory)\n\t\t\tos.chdir('..')\n\t\t\tcontinue\n\t\tcounter = []\n\t\tfor entry in contents:\n\t\t\tentry = entry.split('.')\n\t\t\tnum = entry[0][2:]\n\t\t\ttry: # excludes files that aren't of type x-y.jpg\n\t\t\t\tnum = int(num)\n\t\t\t\tcounter.append(num)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\tcheckpoint.append(max(counter))\n\t\tos.chdir('..')\n\tcheckpoint = np.mean(checkpoint)\n\treturn checkpoint",
"def target_totalfiles(self):\n return self._cfg.get('totalfiles', None)",
"def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)"
] | [
"0.63941246",
"0.6148792",
"0.5896523",
"0.5792946",
"0.56846035",
"0.5674842",
"0.56738776",
"0.56557024",
"0.5595728",
"0.55465335",
"0.55265635",
"0.5509024",
"0.5490125",
"0.54562646",
"0.53724253",
"0.53600603",
"0.53305817",
"0.5296591",
"0.52905095",
"0.52354455",
"0.52294916",
"0.52220196",
"0.5198103",
"0.5195978",
"0.5186539",
"0.51656497",
"0.5163649",
"0.51358575",
"0.5120946",
"0.51164967"
] | 0.6992177 | 0 |
Get vertices dividing a 1d grid. | def get_1d_vertices(grid, cut_edges=False):
if len(grid.shape) > 1:
raise ValueError("grid must be 1d array.")
diff = np.diff(grid)
vert = np.zeros(grid.size+1)
# Interior vertices: halfway between points
vert[1:-1] = grid[0:-1] + diff/2
# Edge vertices: tight or reflect
if cut_edges:
vert[0] = grid[0]
vert[-1] = grid[-1]
else:
vert[0] = grid[0] - diff[0]/2
vert[-1] = grid[-1] + diff[-1]/2
return vert | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices",
"def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices",
"def vertices(self) -> list[Point]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n new_shape = self.shape[:first_polygon_index] + (-1, self.shape[-1])\n array = self.array.reshape(new_shape)\n return list(distinct(Point(x, copy=False) for x in np.moveaxis(array, -2, 0)))",
"def vertices(self):\n return self._vertices",
"def get_vertices(self):\n return self.vertices",
"def get_vertices(self):\n return self.vertices",
"def vertices(self):\n return self.pointlist",
"def get_vertices(self):\n return self._vertices",
"def vertices(self):\n return list(self._graph)",
"def _vertices(self, point):\n vertex_0, vertex_1, vertex_2 = tuple(\n gs.take(point, indices=self.faces[:, i], axis=-2) for i in range(3)\n )\n if point.ndim == 3 and vertex_0.ndim == 2:\n vertex_0 = gs.expand_dims(vertex_0, axis=0)\n vertex_1 = gs.expand_dims(vertex_1, axis=0)\n vertex_2 = gs.expand_dims(vertex_2, axis=0)\n return vertex_0, vertex_1, vertex_2",
"def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T",
"def get_vertices(self, crs=None):\n if (crs is None) or (crs is self.crs):\n return np.array(self.vertices)\n else:\n vertices = [_reproject(v[:2], self.crs, crs)\n for v in self.vertices]\n return np.array(vertices)",
"def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)",
"def get_vertices(self):\n\n return self._vertices",
"def get_vertices(self, crs=None):\n if crs is None:\n vertices = []\n for poly_vertices in self.vertices:\n vertices.append([np.array(v) for v in poly_vertices])\n return vertices\n else:\n vertices = []\n for poly_vertices in self.vertices:\n poly = []\n for ring_vertices in poly_vertices:\n poly.append(np.array([_reproject(v[:2], self.crs, crs)\n for v in ring_vertices]))\n vertices.append(poly)\n return vertices",
"def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vectors.shape[1])]\n combs = list(itertools.product(*V))\n for cb in combs:\n cb = np.sum(np.array(cb).T, axis=1, keepdims=True)\n vertices.append(self.base_vertices + cb)\n\n vertices = np.concatenate(vertices,axis=1)\n return vertices",
"def get_vertices(self, crs=None):\n if crs is None:\n return [np.array(v) for v in self.vertices]\n else:\n vertices = []\n for line in self.vertices:\n line_vertices = [_reproject(v[:2], self.crs, crs) for v in line]\n vertices.append(np.array(line_vertices))\n return vertices",
"def get_vertices(self):\n return self.vertList.keys()",
"def vertices(self):\n return self.keys()",
"def vertices(self):\n return list(self.__graph.values())",
"def vertices(self):\n return map(Vertex, self._top_exp.vertices())",
"def mesh(self):\n return numpy.meshgrid(*self.edges, indexing='ij')",
"def get_vertices(self):\n return list(self.vertices.keys())",
"def meshgrid(self):\n vecs = self.coord_vecs\n return np.meshgrid(*vecs, indexing='ij')",
"def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]",
"def get_vertices(self):\n return self.graph.keys()",
"def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex",
"def vertices(self) -> list[Point]:\n a = Point(self.array[..., 0, :], copy=False)\n b = Point(self.array[..., 1, :], copy=False)\n return [a, b]",
"def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg",
"def make_complete_graph(num_vertices):\n V = num_vertices\n K = V * (V - 1) // 2\n grid = np.zeros([3, K], np.int32)\n k = 0\n for v2 in range(V):\n for v1 in range(v2):\n grid[:, k] = [k, v1, v2]\n k += 1\n return grid"
] | [
"0.6591957",
"0.65517676",
"0.6523746",
"0.6520511",
"0.64833",
"0.64833",
"0.64218307",
"0.6383808",
"0.6376488",
"0.63350326",
"0.6328131",
"0.63177186",
"0.6316762",
"0.6278034",
"0.6251261",
"0.6179584",
"0.61629945",
"0.6152223",
"0.61260945",
"0.6122966",
"0.6108887",
"0.60676056",
"0.6065647",
"0.60630107",
"0.60521007",
"0.60490906",
"0.6042865",
"0.60328555",
"0.6027899",
"0.6008264"
] | 0.76482964 | 0 |
Compute padded image limits for x and y grids. | def pad_limits(xgrid, ygrid, xpad=0., ypad=0., square=None):
xmin, xmax = xgrid.min(), xgrid.max()
ymin, ymax = ygrid.min(), ygrid.max()
dx = xmax - xmin
dy = ymax - ymin
x0 = xmin - xpad*dx
x1 = xmax + xpad*dx
y0 = ymin - ypad*dy
y1 = ymax + ypad*dy
if square:
axis = square
ax_position = axis.get_position()
ax_height = ax_position.height * axis.figure.get_figheight()
ax_width = ax_position.width * axis.figure.get_figwidth()
ax_aspect = ax_height / ax_width
im_height = y1 - y0
im_width = x1 - x0
im_aspect = im_height / im_width
if (im_height/im_width) > (ax_height/ax_width):
# Image too tall
extra_w = im_height/ax_aspect - im_width
x0 -= extra_w / 2
x1 += extra_w / 2
else:
# Image too wide
extra_h = im_width*ax_aspect - im_height
y0 -= extra_h / 2
y1 += extra_h / 2
return [x0, x1, y0, y1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_image_bounds(pixel_meter_size, frame, beam_width_data, additional_pixel_padding_x=0, additional_pixel_padding_y=0):\n\n # Compute the projected locations of all samples so that we can get the extent\n all_bl = []\n all_br = []\n all_fr = []\n all_fl = []\n\n for beam_num in [0, frame.BeamCount / 2, frame.BeamCount - 1]:\n for bin_num in [0, frame.samplesperbeam - 1]:\n bl, br, fr, fl = get_box_for_sample(beam_num, bin_num, frame, beam_width_data)\n\n all_bl.append(bl)\n all_br.append(br)\n all_fr.append(fr)\n all_fl.append(fl)\n\n all_bl = np.array(all_bl)\n all_br = np.array(all_br)\n all_fr = np.array(all_fr)\n all_fl = np.array(all_fl)\n\n # Get the xdim extent\n min_back_left = np.min(all_bl[:,0])\n min_back_right = np.min(all_br[:,0])\n min_front_left = np.min(all_fl[:,0])\n min_front_right = np.min(all_fr[:,0])\n assert min_back_left < min_back_right\n assert min_back_left < min_front_left\n assert min_back_left < min_front_right\n\n max_back_left = np.max(all_bl[:,0])\n max_back_right = np.max(all_br[:,0])\n max_front_left = np.max(all_fl[:,0])\n max_front_right = np.max(all_fr[:,0])\n assert max_back_right > max_back_left\n assert max_back_right > max_front_left\n assert max_back_right > max_front_right\n\n xdim_extent = np.array([min_back_left, max_back_right])\n\n\n # Get the ydim extent\n min_back_left = np.min(all_bl[:,1])\n min_back_right = np.min(all_br[:,1])\n min_front_left = np.min(all_fl[:,1])\n min_front_right = np.min(all_fr[:,1])\n min_front = min(min_front_left, min_front_right)\n assert min_front < min_back_right\n assert min_front < min_back_left\n\n\n max_back_left = np.max(all_bl[:,1])\n max_back_right = np.max(all_br[:,1])\n max_front_left = np.max(all_fl[:,1])\n max_front_right = np.max(all_fr[:,1])\n max_back = max(max_back_left, max_back_right)\n assert max_back > max_front_right\n assert max_back > max_front_left\n\n ydim_extent = np.array([min_front, max_back])\n\n # Determine which meter location corresponds to our \"target center\"\n bl, br, fr, fl = get_box_for_sample(frame.BeamCount / 2, 0, frame, beam_width_data)\n target_center_x = (fl[0] + fr[0]) / 2.\n target_center_y = (bl[1] + fl[1]) / 2.\n\n # Determine the x dimension size and what this corresponds to in meters\n extra_padding_x = pixel_meter_size + pixel_meter_size * additional_pixel_padding_x\n\n # X Min\n xmin_len = target_center_x - xdim_extent[0]\n xp = xmin_len % pixel_meter_size\n xmin_padded = xdim_extent[0] - (extra_padding_x - xp)\n xmin_len = target_center_x - xmin_padded\n x_min_cells = np.abs(xmin_len / pixel_meter_size)\n x_min_meters = target_center_x - xmin_len\n assert x_min_meters <= xdim_extent[0]\n\n\n # X Max\n xmax_len = xdim_extent[1] - target_center_x\n xp = xmax_len % pixel_meter_size\n xmax_padded = xdim_extent[1] + (extra_padding_x - xp)\n xmax_len = xmax_padded - target_center_x\n x_max_cells = np.abs(xmax_len / pixel_meter_size)\n x_max_meters = target_center_x + xmax_len\n assert x_max_meters >= xdim_extent[1]\n\n\n # if we want a specific beam to be the in the middle of the image then we should take the max?\n xdim = int(x_min_cells + x_max_cells)\n x_meter_start = x_min_meters\n x_meter_stop = x_max_meters\n\n # Determine the y dimension size and what this corresponds to in meters\n extra_padding_y = pixel_meter_size + pixel_meter_size * additional_pixel_padding_y\n\n # Y Min\n ymin_len = target_center_y - ydim_extent[0]\n yp = ymin_len % pixel_meter_size\n ymin_padded = ydim_extent[0] - ( extra_padding_y - yp)\n ymin_len = target_center_y - ymin_padded\n y_min_cells = np.abs(ymin_len / pixel_meter_size)\n y_min_meters = target_center_y - ymin_len\n assert y_min_meters <= ydim_extent[0]\n\n # Y Max\n ymax_len = ydim_extent[1] - target_center_y\n yp = ymax_len % pixel_meter_size\n ymax_padded = ydim_extent[1] + (extra_padding_y - yp)\n ymax_len = ymax_padded - target_center_y\n y_max_cells = np.abs(ymax_len / pixel_meter_size)\n y_max_meters = target_center_y + ymax_len\n assert y_max_meters >= ydim_extent[1]\n\n ydim = int(y_min_cells + y_max_cells)\n y_meter_start = y_max_meters\n y_meter_stop = y_min_meters\n\n return xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop",
"def padding(self):\n if not self._pixels:\n return Bounds(0, 0, 0, 0)\n row_inked = tuple(self._1 in _row for _row in self._pixels)\n if not any(row_inked):\n return Bounds(self.width, self.height, 0, 0)\n bottom = row_inked[::-1].index(True)\n top = row_inked.index(True)\n col_inked = tuple(self._1 in _col for _col in zip(*self._pixels))\n left = col_inked.index(True)\n right = col_inked[::-1].index(True)\n return Bounds(left, bottom, right, top)",
"def _axes_limits(image_width, fractional_padding=0.5):\n # calculate widths and padding for each item\n overlay_width = image_width\n colorbar_width = int(0.05 * image_width)\n xy_width = image_width\n overlay_colorbar_padding_width = int(0.05 * image_width)\n colorbar_xy_padding_width = int(fractional_padding * image_width)\n\n # set limits based on item sizes\n left_lim = 0\n right_lim = overlay_width\n overlay_lim = (left_lim, right_lim)\n\n left_lim = right_lim + overlay_colorbar_padding_width\n right_lim = left_lim + colorbar_width\n colorbar_lim = (left_lim, right_lim)\n\n left_lim = right_lim + colorbar_xy_padding_width\n right_lim = left_lim + xy_width\n xy_lim = (left_lim, right_lim)\n\n return colorbar_lim, overlay_lim, xy_lim",
"def visualize_grid(Xs, ubound=255.0, padding=1):\n (N, H, W, C) = Xs.shape\n grid_size = int(ceil(sqrt(N)))\n grid_height = H * grid_size + padding * (grid_size - 1)\n grid_width = W * grid_size + padding * (grid_size - 1)\n grid = np.zeros((grid_height, grid_width, C))\n next_idx = 0\n y0, y1 = 0, H\n for y in range(grid_size):\n x0, x1 = 0, W\n for x in range(grid_size):\n if next_idx < N:\n img = Xs[next_idx]\n low, high = np.min(img), np.max(img)\n grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)\n # grid[y0:y1, x0:x1] = Xs[next_idx]\n next_idx += 1\n x0 += W + padding\n x1 += W + padding\n y0 += H + padding\n y1 += H + padding\n # grid_max = np.max(grid)\n # grid_min = np.min(grid)\n # grid = ubound * (grid - grid_min) / (grid_max - grid_min)\n return grid",
"def checkRange(x,y,w,h,maxW,maxH):\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x + w >= maxW:\n w = maxW-x-1\n if y + h >= maxH:\n h = maxH-y-1\n return [x,y,w,h]",
"def __clip_bbox(min_y, min_x, max_y, max_x):\n min_y = tf.clip_by_value(min_y, 0.0, 1.0)\n min_x = tf.clip_by_value(min_x, 0.0, 1.0)\n max_y = tf.clip_by_value(max_y, 0.0, 1.0)\n max_x = tf.clip_by_value(max_x, 0.0, 1.0)\n return min_y, min_x, max_y, max_x",
"def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row",
"def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)",
"def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0:1:1], boxes[:, 2:3:1], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1:2:1], boxes[:, 3:4:1], h, padding, cast=False)\n\n cast = P.Cast()\n broadcast_to = P.BroadcastTo((h, w, n))\n row = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(1, -1, 1)))\n rows = cast(row, x1.dtype)\n col = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(-1, 1, 1)))\n cols = cast(col, x2.dtype)\n\n\n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_left = P.Cast()(masks_left, mindspore.float16)\n masks_right = P.Cast()(masks_right, mindspore.float16)\n crop_mask = masks_left * masks_right\n masks_up = cols >= y1.view(1, 1, -1)\n masks_up = P.Cast()(masks_up, mindspore.float16)\n crop_mask *= masks_up\n masks_down = cols < y2.view(1, 1, -1)\n masks_down = P.Cast()(masks_down, mindspore.float16)\n crop_mask *= masks_down\n\n return masks * crop_mask",
"def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x,pos_y = pos_x/pix,pos_y/pix \r\n\r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode==\"Delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images",
"def boundary(self,image,i,j):\r\n if((j >=25- self.padding) and (j <=175+ self.padding ) and (i >= 425- self.padding) and (i <= 575+ self.padding)):\r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2\r\n #print(2)\r",
"def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x = [pos_x_/pix for pos_x_ in pos_x]\r\n pos_y = [pos_y_/pix for pos_y_ in pos_y]\r\n padding_modes = [\"cv2.BORDER_CONSTANT\",\"cv2.BORDER_REFLECT\",\"cv2.BORDER_REFLECT_101\",\"cv2.BORDER_REPLICATE\",\"cv2.BORDER_WRAP\"]\r\n \r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode.lower()==\"delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n if padding_mode.lower()==\"alternate\":\r\n ind = rand_state.randint(low=0,high=len(padding_modes))\r\n padding_mode = padding_modes[ind]\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_modes[ind]))\r\n else:\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def _get_padded_grid_(ax):\n ax_pad = np.zeros(ax.size + 2)\n ax_pad[1:-1] = ax\n ax_pad[0] = ax[0] - (ax[2] - ax[1])\n ax_pad[-1] = ax[-1] + (ax[2] - ax[1])\n\n return ax_pad",
"def crop_mask(mask, crop_offset=0.5):\n maxx, maxy, minx, miny = 0, 0, 0, 0\n for r in range(0, mask.shape[0]):\n if np.min(mask[r]) < 255:\n minx = int(r + mask.shape[0] * (crop_offset / 100))\n break\n\n for r in range(mask.shape[0] - 1, 0, -1):\n if np.min(mask[r]) < 255:\n maxx = int(r - mask.shape[0] * (crop_offset / 100))\n break\n\n for c in range(0, mask.shape[1]):\n if np.min(mask[:, c]) < 255:\n miny = int(c + mask.shape[1] * (crop_offset / 100))\n break\n\n for c in range(mask.shape[1] - 1, 0, -1):\n if np.min(mask[:, c]) < 255:\n maxy = int(c - mask.shape[1] * (crop_offset / 100))\n break\n\n return (maxx, maxy, minx, miny)",
"def _grid_around_star(self, x0, y0, data):\n lenx, leny = data.shape\n xmin, xmax = max(x0 - self._box / 2, 0), min(x0 + self._box / 2 + 1, lenx - 1)\n ymin, ymax = max(y0 - self._box / 2, 0), min(y0 + self._box / 2 + 1, leny - 1)\n return np.mgrid[int(xmin) : int(xmax), int(ymin) : int(ymax)]",
"def boundary1(self,image,i,j):\r\n if((j >=375- self.padding) and (j <=625+ self.padding ) and (i >= 425- self.padding) and (i <= 575+ self.padding)):\r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2",
"def adjustCoordByBorders(self, x, y):\n (x_offset, y_offset) = (0, 0)\n if (127 < x < (255+4)):\n x_offset -= 4\n \n if (31 < y < (63+4*1)):\n y_offset -= 4\n elif ( (63+4*1) < y < (95+4*2) ):\n y_offset -= 4*2\n elif ( (95+4*2) < y < (127+4*3) ):\n y_offset -= 4*3\n elif ( (127+4*3) < y < (159+4*4) ):\n y_offset -= 4*4\n elif ( (159+4*4) < y < (191+4*5) ):\n y_offset -= 4*5\n elif ( (191+4*5) < y < (223+4*6) ):\n y_offset -= 4*6\n elif ( (223+4*6) < y < (255+4*7) ):\n y_offset -= 4*7\n return x_offset, y_offset",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def _clip_boxes(boxes, im_shape):\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes",
"def _clip_boxes(boxes, im_shape):\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes",
"def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):\n\n x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max\n x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)\n if x_range > y_range:\n y_center = (y_max + y_min) / 2\n y_axis_max = y_center + x_range / 2\n y_axis_min = y_center - x_range / 2\n else:\n x_center = (x_max + x_min) / 2\n x_axis_max = x_center + y_range / 2\n x_axis_min = x_center - y_range / 2\n\n return x_axis_min, x_axis_max, y_axis_min, y_axis_max",
"def clip_grid(grid, xr, yr, extra_m=5000):\n\n min_x = np.min(xr)\n min_y = np.min(yr)\n max_x = np.max(xr)\n max_y = np.max(yr)\n\n mask_x = np.logical_and(grid.x['data'] > min_x - extra_m,\n grid.x['data'] < max_x + extra_m)\n mask_y = np.logical_and(grid.y['data'] > min_y - extra_m,\n grid.y['data'] < max_y + extra_m)\n\n grid.x['data'] = grid.x['data'][mask_x]\n grid.y['data'] = grid.y['data'][mask_y]\n for f in grid.fields.keys():\n nz = len(grid.fields[f]['data']) # Nb of z levels\n grid.fields[f]['data'] = grid.fields[f]['data'][np.ix_(range(nz),\n mask_y, mask_x)]\n grid.nx = len(grid.x['data'])\n grid.ny = len(grid.y['data'])\n return grid",
"def visualize_grid(Xs, ubound=255.0, padding=1):\n pixel_sz = 2\n (H, W, C, N) = Xs.shape\n\n Xs_resize = np.zeros((H*pixel_sz, W*pixel_sz, C, N))\n Xs = (ubound*(Xs-np.min(Xs))/(np.max(Xs)-np.min(Xs))).astype('uint8')\n\n for c in range(C):\n for n in range(N):\n Xs_resize[:,:,c,n] = imresize(Xs[:,:,c,n], 200, interp='nearest')\n Xs = Xs_resize\n\n (H, W, C, N) = Xs.shape\n low, high = np.min(Xs), np.max(Xs)\n\n if C==1 or C==3:\n grid_size_H = int(ceil(sqrt(N)))\n grid_size_W = int(ceil(sqrt(N)))\n else:\n grid_size_H = N\n grid_size_W = C\n\n count = 0\n grid_height = H * grid_size_H + padding * (grid_size_H-1)\n grid_width = W * grid_size_W + padding * (grid_size_W-1)\n grid = np.zeros((grid_height, grid_width, C))\n y0, y1 = 0, H\n for y in range(grid_size_H):\n x0, x1 = 0, W\n for x in range(grid_size_W):\n if C==1 or C==3:\n img = Xs[:,:,:,count]\n count += 1\n else:\n img = np.expand_dims(Xs[:,:,x,y], axis=-1)\n\n grid[y0:y1, x0:x1, :] = ubound * (img - low) / (high - low)\n x0 += W + padding\n x1 += W + padding\n\n y0 += H + padding\n y1 += H + padding\n\n if C!=3:\n grid = grid[:,:,0]\n return grid",
"def _fix_span(x, y, xmin, xmax):\n if x.ndim != 1:\n return x, y\n\n # Roll in same direction if some points on right-edge extend\n # more than 360 above min longitude; *they* should be on left side\n lonroll = np.where(x > xmin + 360)[0] # tuple of ids\n if lonroll.size: # non-empty\n roll = x.size - lonroll.min()\n x = np.roll(x, roll)\n y = np.roll(y, roll, axis=-1)\n x[:roll] -= 360 # make monotonic\n\n # Set NaN where data not in range xmin, xmax. Must be done\n # for regional smaller projections or get weird side-effects due\n # to having valid data way outside of the map boundaries\n y = y.copy()\n if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges\n y[..., (x[1:] < xmin) | (x[:-1] > xmax)] = np.nan\n elif x.size == y.shape[-1]: # test the centers and pad by one for safety\n where = np.where((x < xmin) | (x > xmax))[0]\n y[..., where[1:-1]] = np.nan\n\n return x, y",
"def calculate_min_max_tiles(self):",
"def _cell_bounds_xy(self, x, y, dx = None):\n\t\tif dx is None:\n\t\t\tlev = bhpix.get_pixel_level(x, y)\n\t\t\tdx = bhpix.pix_size(lev)\n\t\t\t##dx = bhpix.pix_size(self.level)\n\n\t\tbounds = Polygon.Shapes.Rectangle(dx)\n\t\tbounds.shift(x - 0.5*dx, y - 0.5*dx);\n\n\t\tif fabs(fabs(x) - fabs(y)) == 0.5:\n\t\t\t# If it's a \"halfpixel\", return a triangle\n\t\t\t# by clipping agains the sky\n\t\t\tbounds &= bn.ALLSKY\n\n\t\treturn bounds",
"def _filter_img_boxes(boxes, im_info):\n padding = 50\n w_min = -padding\n w_max = im_info[1] + padding\n h_min = -padding\n h_max = im_info[0] + padding\n keep = np.where((w_min <= boxes[:,0]) & (boxes[:,2] <= w_max) & (h_min <= boxes[:,1]) &\n (boxes[:,3] <= h_max))[0]\n return keep",
"def sanitize_coordinates(_x1, _x2, img_size: int, padding: int = 0, cast: bool = True):\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n coordinates_min = P.Minimum()\n coordinates_max = P.Maximum()\n x1 = coordinates_min(_x1, _x2)\n x2 = coordinates_max(_x1, _x2)\n\n select = P.Select()\n zeroslike = P.ZerosLike()\n oneslike = P.OnesLike()\n min_tensor = zeroslike(x1 - padding)\n x1 = select(min_tensor > x1 - padding, min_tensor, x1 - padding)\n\n max_tensor = oneslike(x2 + padding) * img_size\n x2 = select(x2 + padding > max_tensor, max_tensor, x2 + padding)\n\n\n return x1, x2",
"def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right"
] | [
"0.6500194",
"0.6199433",
"0.6105291",
"0.60281",
"0.59161913",
"0.5894251",
"0.585068",
"0.58396924",
"0.5810349",
"0.5791982",
"0.57880354",
"0.5763434",
"0.57571024",
"0.57165116",
"0.57111406",
"0.5668063",
"0.56606424",
"0.565698",
"0.56563175",
"0.5641682",
"0.5641682",
"0.5638964",
"0.56364",
"0.56123",
"0.55897206",
"0.55895275",
"0.5585547",
"0.55576754",
"0.55565083",
"0.5540652"
] | 0.7511422 | 0 |
Generate a object formatter for links.. | def link(text, link_func):
def object_formatter(v, c, m, p):
"""Format object view link."""
return Markup('<a href="{0}">{1}</a>'.format(
link_func(m), text))
return object_formatter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def object_formatter(v, c, m, p):\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))",
"def __repr__(self):\n if self.rest:\n rest_repr = ', ' + repr(self.rest)\n else:\n rest_repr = ''\n return 'Link({0}{1})'.format(self.first, rest_repr)",
"def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''",
"def fmt(e):\n name = str(e.label.first() if hasattr(e, 'label') and e.label else e)\n if re.match(r'^[a-z]+://', name):\n return link.format(name=name, url=name)\n if hasattr(e, 'label') and e.label:\n name = e.label.first()\n url = name if re.match(r'^[a-z]+://', name) else '#' + name\n return link.format(name=name, url=url)\n elif re.match(r'^[a-z]+://', str(e)):\n return link.format(name=e, url=e)\n else:\n return str(e).replace('owl.', 'owl:')",
"def __repr__(self):\n ## return str(self.first) + \" -> \" + repr(self.rest)\n if self.rest is Link.empty:\n rest_str = \"\"\n else:\n rest_str = \", \" + repr(self.rest)\n return \"Link({0}{1})\".format(self.first, rest_str)",
"def __str__(self):\n\t\treturn '{0} ({1})'.format (self.name, self.link)",
"def format(self, obj):\n pass",
"def format(self, obj):\n pass",
"def __repr__(self):\n if self.rest is Link.empty:\n rest = ''\n else:\n rest = ', ' + repr(self.rest)\n return 'Link({0}{1})'.format(self.first, rest)",
"def format_link_segment(value):\n format_type = json_api_settings.FORMAT_RELATED_LINKS\n return format_value(value, format_type)",
"def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))",
"def __str__(self):\n return '<a href=\"%s\" class=\"%s\" %s>%s</a>' % (self.url, self.cssclass, self.options, self.text)",
"def to_html(self) -> str:\n return f'''\n <a href=\"{self.link}\"> ({self.source_name}, {self.timestamp.strftime('%Y')}) </a>\n '''",
"def linkified_description(self):\n links = []\n def linkify(matchobj, links=links):\n if '|' in matchobj.group(1):\n url = matchobj.group(1).split('|')\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', url[0], url[1])\n else:\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', self.url, matchobj.group(1))\n links.append(link)\n return '{%d}' % (len(links) - 1)\n\n fmt = re.sub(r'\\[\\[([^\\]]+)\\]\\]', linkify, self.description)\n return format_html(fmt, *links)",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def _format_obj(cls, **kwargs):\n def doc_rebuilder(obj):\n if kwargs.pop('_VOID_',False):\n return ''\n try:\n doc = getattr(obj,'__doc__')\n assert doc\n except:\n return ''\n else:\n return doc.format(**kwargs) # str(doc).format(**kwargs)\n return doc_rebuilder",
"def generate_link(resources):\n\n links = \"\"\n for i, resource in enumerate(resources):\n link = \"<\" + resource[\"path\"] + \">\"\n if \"parameters\" in resource:\n for parameter in resource[\"parameters\"]:\n link += \";\" + str(parameter) + \"=\" + str(resource[\"parameters\"][parameter])\n links += link\n if i != len(resources) - 1:\n links += \",\"\n return links",
"def pretty(self, **kwargs):\r\n raise NotImplementedError",
"def gen_links(text):\n return []",
"def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink",
"def getLink(self):",
"def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output",
"def format(self):\n ...",
"def format_element(bfo, links=\"no\", category=\"yes\", mirrors=\"yes\"):\n\n arxiv=get_arxiv(bfo, category=\"no\")\n\n if len(arxiv) == 0:\n return\n\n out = ''\n if links == 'yes':\n arxiv_ref = arxiv[0] # Take only first one\n out += '''\n<a href=\"http://arXiv.org/abs/%(ref)s\">Abstract</a> and\n<a href=\"http://arXiv.org/ps/%(ref)s\">Postscript</a>\n and <a href=\"http://arXiv.org/pdf/%(ref)s\">PDF</a> from arXiv.org'''% \\\n {'ref': arxiv_ref}\n\n if mirrors.lower()=='yes':\n out+='''\n (mirrors:\n<a href=\"http://au.arXiv.org/abs/%(ref)s\">au</a>\n\n<a href=\"http://br.arXiv.org/%(ref)s\">br</a>\n<a href=\"http://cn.arXiv.org/abs/%(ref)s\">cn</a>\n<a href=\"http://de.arXiv.org/abs/%(ref)s\">de</a>\n<a href=\"http://es.arXiv.org/abs/%(ref)s\">es</a>\n<a href=\"http://fr.arXiv.org/abs/%(ref)s\">fr</a>\n<a href=\"http://il.arXiv.org/abs/%(ref)s\">il</a>\n<a href=\"http://in.arXiv.org/abs/%(ref)s\">in</a>\n<a href=\"http://it.arXiv.org/abs/%(ref)s\">it</a>\n<a href=\"http://jp.arXiv.org/abs/%(ref)s\">jp</a>\n<a href=\"http://kr.arXiv.org/abs/%(ref)s\">kr</a>\n<a href=\"http://ru.arXiv.org/abs/%(ref)s\">ru</a>\n<a href=\"http://tw.arXiv.org/abs/%(ref)s\">tw</a>\n<a href=\"http://uk.arXiv.org/abs/%(ref)s\">uk</a>\n<a href=\"http://aps.arXiv.org/abs/%(ref)s\">aps</a>\n<a href=\"http://lanl.arXiv.org/abs/%(ref)s\">lanl</a>)''' % \\\n {'ref': arxiv_ref}\n\n\n else: # print only value\n out = ', '.join(get_arxiv(bfo,category))\n\n return out",
"def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr",
"def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))",
"def undo_format_link_segment(value):\n\n if json_api_settings.FORMAT_RELATED_LINKS:\n return format_value(value, \"underscore\")\n\n return value",
"def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj"
] | [
"0.784745",
"0.6504912",
"0.64552146",
"0.6426217",
"0.62232155",
"0.6217698",
"0.60861593",
"0.60861593",
"0.60610193",
"0.60496616",
"0.5940354",
"0.59321237",
"0.5914688",
"0.58717525",
"0.5870953",
"0.5845969",
"0.5824419",
"0.58027077",
"0.57641745",
"0.57560444",
"0.57327914",
"0.5715841",
"0.5698075",
"0.5693841",
"0.5693164",
"0.5693031",
"0.56775624",
"0.5668995",
"0.5656696",
"0.5642389"
] | 0.72780514 | 1 |
Format object view link. | def object_formatter(v, c, m, p):
return Markup('<a href="{0}">{1}</a>'.format(
link_func(m), text)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def link(text, link_func):\n def object_formatter(v, c, m, p):\n \"\"\"Format object view link.\"\"\"\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))\n return object_formatter",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def object_view_with_links(obj, request):\n _view = _object_view(obj, request)\n obj_link = obj_ui_link = \"\"\n # UI link to the real business object referenced as topic\n if isinstance(obj, Posting):\n obj_ui_link = request.link(obj, app=get_root(request).child(\"activitystream\"))\n else:\n try:\n obj_link = request.link(obj, app=_get_collection_app(request))\n except morepath.error.LinkError:\n pass\n obj_ui_link = get_ui_link(request, obj) or \"\"\n _view.update({\n \"object_id\": obj_link,\n \"object_ui_link\": obj_ui_link})\n return _view",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )",
"def _change_link(self, obj, display_text=None):\n if not obj:\n return '?'\n fragments = [obj._meta.app_label, obj._meta.model_name, 'change']\n change_url = reverse(\"admin:{}\".format('_'.join(fragments)),\n args=(obj.id,))\n display_text = display_text or unicode(obj)\n return format_html(\"<a href={}>{}</a>\", change_url, display_text)",
"def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))",
"def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_ackimport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))",
"def pybb_link(object, anchor=''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_text(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))",
"def viewurilink(uri) :\n\tname = schema.uri_to_name(uri)\n\tif name :\n\t\turl = '/view/name/' + quote(name)\n\telif uri[:7] == \"http://\" :\n\t\turl = '/view/uri/' + uri[7:]\n\telse :\n\t\turl = '/view/uri?id=' + uri\n\t\n\treturn '<a href=\"%s\">%s</a>' % (url, name or n.shorten(uri))",
"def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))",
"def view_bed_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"bed.index_view\", search=model.bed.name), model.bed.name)\n )\n if model.bed\n else u\"\"\n )",
"def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_cwrexport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))",
"def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"def __str__(self):\n\t\treturn '{0} ({1})'.format (self.name, self.link)",
"def format(self, obj):\n pass",
"def format(self, obj):\n pass",
"def fmt(e):\n name = str(e.label.first() if hasattr(e, 'label') and e.label else e)\n if re.match(r'^[a-z]+://', name):\n return link.format(name=name, url=name)\n if hasattr(e, 'label') and e.label:\n name = e.label.first()\n url = name if re.match(r'^[a-z]+://', name) else '#' + name\n return link.format(name=name, url=url)\n elif re.match(r'^[a-z]+://', str(e)):\n return link.format(name=e, url=e)\n else:\n return str(e).replace('owl.', 'owl:')",
"def get_object_view_url(self, nuxeo_id):\n parts = urlparse.urlsplit(self.nx.conf[\"api\"])\n url = \"{}://{}/Nuxeo/nxdoc/default/{}/view_documents\".format(parts.scheme, parts.netloc, nuxeo_id) \n return url",
"def getLink(self):",
"def cook(self, obj, request, field_name):\n view_url = ''\n edit_url = ''\n \n if hasattr(obj, 'get_absolute_url'):\n view_url = obj.get_absolute_url();\n if request.user.has_perm('%s.change_%s' %(obj._meta.app_label, obj._meta.model_name)):\n\t\t\tedit_url = reverse('admin:%s_%s_change' %(obj._meta.app_label, obj._meta.model_name), args=[obj.id])\n\t\t\n result = {'text': unicode(obj),\n 'view_url': view_url,\n 'edit_url': edit_url\n }\n return result",
"def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink",
"def view_family_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"family.index_view\", search=model.family.internal_id), model.family,)\n )\n if model.family\n else u\"\"\n )",
"def render_url(self, object_id):\r\n return reverse(\"%s:insert_%s_%s_render\" % (\r\n self.admin_site.name,\r\n self.model._meta.app_label,\r\n self.model._meta.module_name\r\n ), args=(object_id,))",
"def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''",
"def view_invoice_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (\n url_for(\"invoice.index_view\", search=model.invoice.id),\n model.invoice.invoiced_at.date()\n if model.invoice.invoiced_at\n else \"In progress\",\n )\n )\n if model.invoice\n else u\"\"\n )",
"def command_view(arguments):\n global current_mode, current_name\n current_mode = Mode.links\n current_name = arguments[0]\n return 'Now viewing entity \"' + current_name + '\"'",
"def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''",
"def renderView(self):\n html = \"\"\n if self.lms.lms == \"other\":\n html += \"_______________________________<br/>\"\n url = \"http://%s\" % self.lms.otherUrl\n html += \"<br/><b>%s </b>\" % self.lms.otherLabel\n html += '<a href=\"%s\">%s</a>' % (url, url) \n return html"
] | [
"0.73224676",
"0.7216829",
"0.7124374",
"0.68019325",
"0.65847546",
"0.64726907",
"0.64016116",
"0.63936055",
"0.6361755",
"0.635381",
"0.63412184",
"0.63111347",
"0.6248648",
"0.6244252",
"0.61723745",
"0.61185104",
"0.6079688",
"0.6079688",
"0.60349536",
"0.6026926",
"0.6017183",
"0.6002368",
"0.59222513",
"0.5901884",
"0.5883708",
"0.5872645",
"0.58527756",
"0.5842337",
"0.58284605",
"0.58029336"
] | 0.75110674 | 0 |
Returns a list of the currently connected playes (on the MC server). First tries to hit the cache to see if this has been checked recently. If there is no cache entry, queries the Minecraft server's zombiepygman API to get the list of currently connected players. | def _get_connected_player_list(self):
if not zpgapi.is_zgp_api_enabled():
# API is not configured, skip this.
return []
cache_key = 'api_connected_players'
cache_val = cache.get(cache_key)
if cache_val != None:
return cache_val
api = zpgapi.get_zpg_api_iface()
try:
api_response = api.cmd_list_connected()
cache_val = api_response['player_list']
except urllib2.URLError:
# Error with zombiepygman.
# This will get cached, but that's OK. It will prevent request
# pileup on the gunicorn workers.
cache_val = []
cache.set(cache_key, cache_val, 60)
return cache_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_players(self):\n return self.server.status().players.online",
"def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players",
"def players(self):\n return self.currents.player",
"def get_players(self):\n\n # Append the current player to the list and return it\n players_list = list(self._players.queue)\n players_list.append(self._current_player)\n return players_list",
"def getPlayers(self):\n\t\tself.server.playerMutex.lock()\n\t\tplayers = [ (player[0], player[1][3]) for player in self.server.players.items() ]\n\t\tself.server.playerMutex.unlock()\n\t\treturn players",
"def get_players(self):\r\n return self.players.values()",
"def players(self):\n return self._get(\"players\")",
"def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()",
"def active_players(self):\n return self.players.join(players_active).all()",
"def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]",
"def current_players(self):\n return self.previous_event.current_players",
"def get_players():\n return [Mpris_Player(item)\n for item in Mpris_Utils.get_session().list_names()\n if re.match(Mpris_Interfaces.MEDIA_PLAYER, item) > 0]",
"def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)",
"def get_all_players(self):\n\n self._logger.debug(\"Getting player list\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT first_name, last_name, nickname, time FROM player \\\n ORDER BY time DESC\")\n players = cursor.fetchall()\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return players",
"def getPlayers(self):\n return iter(self.players)",
"def get_online_list(self) -> list:\n return self._get_json(self._URLS['GetOnlineList'])[1:]",
"def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )",
"async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output",
"def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)",
"def getPlayerList(self):\n return(self.playerList)",
"def get_active_players(self, season):\n try:\n cursor = self.conn.cursor()\n command = '''\n SELECT Player\n FROM InLeague\n WHERE League IN (SELECT L_ID\n FROM League\n WHERE Season = ?)\n '''\n cursor.execute(command, (season,))\n players = []\n for p in cursor.fetchall():\n players.append(p[0])\n return players\n except BaseException as e:\n self.log.log_error('Fehler beim laden der aktiven Spieler', e)\n raise e",
"def _get_live_games(self):\n response = requests.get(self._get_score_url())\n if response.status_code == 200:\n return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]",
"def getPlayers(self):\n players = []\n for pgp in self.sandboxplayergroupplayer_set.filter(quit=False):\n players.append(pgp.player)\n return players",
"async def fetch_games(self):\n return await self.http.get_game_list()",
"def players(self):\n return self._get_by_class(Player)",
"def current_wifi_clients(self) -> list:\n self._parse_clients_info()\n return self._current_wifi_clients",
"def players(self) -> List[Player]:\n return [self.white_player, self.black_player]",
"def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers",
"def players(self):\n return Player.objects.filter(team=self)",
"def list_available_clients(self):\n connected_clients = self.all_clients.keys()\n return connected_clients"
] | [
"0.7143071",
"0.68768233",
"0.6365127",
"0.635414",
"0.62231076",
"0.60774004",
"0.6067745",
"0.60203606",
"0.6001943",
"0.5917321",
"0.5900113",
"0.58974326",
"0.5869177",
"0.5867893",
"0.5851105",
"0.58382463",
"0.5821852",
"0.57409716",
"0.5719336",
"0.57017183",
"0.5654622",
"0.5652596",
"0.5646992",
"0.5596401",
"0.557703",
"0.5576145",
"0.5571599",
"0.5567744",
"0.5566548",
"0.55665255"
] | 0.82015663 | 0 |
Ensure the value of 'done' is set to False when creating an item | def test_done_default_value_is_False(self):
item = Item(name = "A test item")
self.assertEqual(item.name, "A test item")
self.assertFalse(item.done) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)",
"def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n self.assertItemsEqual(task.is_done, False)",
"def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True",
"def _is_done(self):\n pass",
"def is_done():\n return False",
"def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True",
"def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item",
"def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False",
"def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check",
"def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))",
"def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()",
"def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0",
"def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True",
"def done(self):\n return False",
"def create_item(self, user: User, **kwargs) -> None:",
"def _isDone(self) -> bool:\n pass",
"def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))",
"def test_create_item_missing_value(test_client, item_without_value):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item_without_value),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST",
"def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'",
"def done(self) -> bool:",
"def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)",
"def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")",
"def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)",
"def create_work_item(self):",
"def test_vault_create_new_vault_item(self):\n pass",
"def test_create_a_todo(self):\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"TODO item requires state, due_date and text\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_add_item_using_post(self):\n pass",
"def pending(self):\n self.state = Step.State.PENDING",
"def mark_as_done(self):\n self.status = \"DONE\"",
"def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)"
] | [
"0.73595536",
"0.69080955",
"0.6256873",
"0.6184523",
"0.61320096",
"0.60282576",
"0.6025874",
"0.59891725",
"0.58720154",
"0.5870808",
"0.58685875",
"0.5864126",
"0.5850043",
"0.5843042",
"0.580223",
"0.5788047",
"0.5772935",
"0.5769292",
"0.5767891",
"0.5756782",
"0.57303107",
"0.5727828",
"0.5699209",
"0.5697862",
"0.56792533",
"0.5639623",
"0.56324",
"0.56297165",
"0.5627014",
"0.5625253"
] | 0.7237571 | 1 |
Ensure the value of 'done' is True when set to True when creating an item | def test_done_value_can_be_set_to_True(self):
item = Item(name = "A test item", done = True)
self.assertEqual(item.name, "A test item")
self.assertTrue(item.done) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.assertFalse(item.done)",
"def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n self.assertItemsEqual(task.is_done, False)",
"def _is_done(self):\n pass",
"def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True",
"def is_done():\n return False",
"def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True",
"def done(self) -> bool:",
"def done(self):\n return False",
"def _isDone(self) -> bool:\n pass",
"def mark_as_done(self):\n self.status = \"DONE\"",
"def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'",
"def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False",
"def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()",
"def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))",
"def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)",
"def done(self):\n self.status = 'completed'\n self.end = datetime.datetime.now()\n self.save()",
"def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)",
"def done(self):\n return self._info['status'] == 'DONE'",
"def is_done(self):\n return self._done",
"def test_completed(self):\n return False",
"def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True",
"def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0",
"def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check",
"def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0",
"def get_isDone(self):\n pass",
"def done(self) -> bool:\n return pulumi.get(self, \"done\")",
"def force_done(self):\n\n if self.can_done():\n return self.done()\n else:\n # we can not set that quest to done regularly, so we force it\n # nobody gets any experience and we might need a special notification for this\n self.quest.done = True\n self.quest.save()\n signals.quest_done.send(None, quest=self.quest)",
"def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item",
"def can_mark_as_done(self):\n if (not self.event_store.done) and \\\n ((not self.file_submission_required) or self.event_store.has_file_submission) and \\\n (not self.contains_questions):\n return True\n return False",
"def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)"
] | [
"0.7198816",
"0.6800873",
"0.65567577",
"0.6417597",
"0.6363705",
"0.6359525",
"0.6172014",
"0.6137984",
"0.61083496",
"0.6047177",
"0.60278106",
"0.6007678",
"0.59839237",
"0.59442586",
"0.590649",
"0.5906427",
"0.5846286",
"0.5799823",
"0.5794618",
"0.5780171",
"0.57660437",
"0.576509",
"0.57642",
"0.57324827",
"0.5731095",
"0.57280755",
"0.5707831",
"0.57019037",
"0.56942606",
"0.56815094"
] | 0.76738805 | 0 |
Ensure the string value of the object is equal to the item name | def test_object_name_is_equal_to_item_name(self):
item = Item(name = "A test item")
self.assertEqual(str(item), "A test item") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])",
"def _valid_object_with_name(ui_object):\n return ui_object.obj_name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_values_single(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n self.assertEqual(il._values.get(\"name\"), [\"foo\"])",
"def test_name3(self):\n new = self.value()\n self.assertEqual(type(new.name), str)",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def set_name(self, item_name):\r\n self.name = item_name",
"def test_printing_shoppping_item_returns_name(create_shopping_item):\n item = create_shopping_item\n assert item.__str__() == 'shopping item one'",
"def __getitem__(self, item):\n return self._object_names[item]",
"def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']",
"def validate(self, name):\n return name in self.dict",
"def test_asset_name():\n\n invalid = {}\n inventory_ = copy.deepcopy(self._inventory)\n inventory_[\"assets\"].append(invalid)\n\n for name in (\"mixedCaseOk\",\n \"lowercaseok\",\n \"underscore_ok\"):\n invalid[\"name\"] = name\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n for name in (\"spaces not ok\",\n \"special~characters$not^ok\",\n \"dash-not-ok\"):\n invalid[\"name\"] = name\n\n assert_raises(\n schema.ValidationError,\n inventory.save,\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )",
"def _check_name(self):\n\t\tpass",
"def test_name_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.name = 'bar'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_set_value_not_str(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.assertRaises(TypeError, lambda: self.helper.set_value([\"Hello\", \"World!\"]))",
"def need_name(dictionary, raise_error=True):\r\n return key_checker(['name'])(dictionary, raise_error)",
"def test_correct_upload_item(upload_items: List[JSONDict]) -> None:\n validated = UploadItem(**upload_items[0])\n assert validated.dict() == upload_items[0]",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def exists(self, obj):\n\t\tif obj.get('name') and obj.get('type'):\n\t\t\treturn self.db.sql(\"select name from `%s` where name=%s\" % \\\n\t\t\t \t(obj['type'],'%s'), obj['name'])",
"def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def getName(cls, itemValue):\n for name, value in cls.iterate():\n if itemValue == value:\n return name\n\n raise ValueError('Value {0} not found in {1}'.format(itemValue, cls.__name__))",
"def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)",
"def test_keep_single_value(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\"]})",
"def _check_key_name(cls, name):\n return (isinstance(name, basestring) and\n re.match('^[A-Za-z][A-Za-z0-9_]*$', name) and\n not hasattr(cls, name))",
"def test_name(self):\n place = Place()\n self.assertTrue(hasattr(place, \"name\"))\n self.assertEqual(type(place.name), str)\n self.assertEqual(place.name, \"\")",
"def check_all_objects_have_names(self):\n for entity in crest.get_all_crest_objects(self.model):\n assert entity._name is not None, f\"Object {entity} has no name\"",
"def test_strings(self):\n\n for cls in [IndependentMoney, Beneficiary, CommitteeBenefactor,\n OtherBenefactor, PersonBenefactor, Benefactor,\n PartyBenefactor, Committee]:\n if cls.objects.all().count() == 0: # bad :(\n try:\n obj = cls()\n except:\n continue\n else:\n obj = cls.objects.all()[0]\n\n self.assertNotIn('Object', str(obj), cls.__name__)\n self.assertNotIn('Object', unicode(obj), cls.__name__)\n\n self.assertNotEqual('', str(obj), cls.__name__)\n self.assertNotEqual('', unicode(obj), cls.__name__)",
"def get_item_name(self, i):\n for item in self.items:\n if item['id'] == i:\n return item['localized_name']\n return 'Unknown Item'"
] | [
"0.72171146",
"0.6880246",
"0.6773977",
"0.6383563",
"0.6352",
"0.6319917",
"0.62903845",
"0.6150476",
"0.60850054",
"0.60359854",
"0.60222614",
"0.5974848",
"0.5964395",
"0.59447414",
"0.5942534",
"0.59195846",
"0.59191364",
"0.5898834",
"0.58966243",
"0.5865687",
"0.5844299",
"0.5837067",
"0.58190876",
"0.5812556",
"0.58083355",
"0.58058465",
"0.57844996",
"0.57691634",
"0.5719298",
"0.5715222"
] | 0.7843017 | 0 |
Create a postvalidator function that makes sure the value of this item is a key in the sibling dictionary 'sib_name'. Raises a ValueError if not. This generally assumes siblings[sib_name] is a required CategoryElement. | def is_sib_key(sib_name):
def is_sib_key_val(siblings, value):
if value not in siblings[sib_name].keys():
raise ValueError(
"Must be a key of {}, but got {}"
.format(sib_name, value))
return value
return is_sib_key_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self):\n for search_tag_name in self.get_search_tag_names():\n search_tag_obj = Tag(search_tag_name)\n for search_tag_value in self.get_search_tag_values(search_tag_name):\n for new_tag_name in self.get_new_tag_names(search_tag_name, search_tag_value):\n new_tag_obj = Tag(new_tag_name)\n new_tag_value = self.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)\n if new_tag_obj.repeatable:\n if not isinstance(new_tag_value, list):\n raise KeyError('%s needs a list'%(new_tag_name))\n else:\n if isinstance(new_tag_value, list):\n raise KeyError('%s needs a scalar value'%(new_tag_name))",
"def check_items_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for slug in slugs:\n try:\n item = i[1]['_class'].objects.get(slug=slug)\n raise ItemAttributeChoicesSlugsDuplicateItemInstanceSlug(cls, item)\n except ObjectDoesNotExist:\n pass",
"def validate_children(self, source, **kwargs):\n # TODO cache this loaded data keyed on a hashed version of kwargs\n children = self._load_json(\"children\", source, **kwargs)\n self._validate_against_schema(\"children\", children)\n\n strand = getattr(self, \"children\", [])\n\n # Loop the children and accumulate values so we have an O(1) check\n children_keys = {}\n for child in children:\n children_keys[child[\"key\"]] = children_keys.get(child[\"key\"], 0) + 1\n\n # Check there is at least one child for each item described in the strand\n # TODO add max, min num specs to the strand schema and check here\n for item in strand:\n strand_key = item[\"key\"]\n if children_keys.get(strand_key, 0) <= 0:\n raise exceptions.InvalidValuesContents(f\"No children found matching the key {strand_key}\")\n\n # Loop the strand and add unique keys to dict so we have an O(1) check\n strand_keys = {}\n for item in strand:\n strand_keys[item[\"key\"]] = True\n\n # Check that each child has a key which is described in the strand\n for child in children:\n child_key = child[\"key\"]\n if not strand_keys.get(child_key, False):\n raise exceptions.InvalidValuesContents(\n f\"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine.\"\n )\n\n # TODO Additional validation that the children match what is set as required in the Twine\n return children",
"def _validate(self, value, name):\n validated = self._validate_impl(value, name)\n return self._validate_post(value, name, validated)",
"def check_categories_slugs(cls, slugs):\n CategoryModel = apps.get_model(settings.DJCAT_CATEGORY_MODEL)\n for node in CategoryModel.objects.all():\n if node.slug in slugs:\n raise ItemAttributeChoicesSlugsDuplicateWithcCategory(cls, node)",
"def check_dict_alg(dic, validator, entry_list, messages, whole_validator, current_elem):\n for node in validator:\n new_list = dc(entry_list)\n node_value = validator[node]\n if node != 'isReference':\n if not ('isReference' in node_value and len(entry_list) == 0):\n if is_operator(node):\n handle_operator(\n node, dic, validator, new_list, messages, whole_validator, current_elem\n )\n elif is_leaf(node_value):\n new_list.append(node)\n check_leaf(node_value, dic, new_list, messages, current_elem)\n else:\n new_list.append(node)\n check_dict_alg(\n dic, node_value, new_list, messages, whole_validator, current_elem\n )",
"def GetSubkeyByName(self, name):",
"def grandparent_splitter(fn, valid_name=\"valid\", train_name=\"train\"):\n gp = fn.parent.parent.name\n if gp == valid_name:\n return True\n elif gp == train_name:\n return False\n return",
"def hasSiblings():",
"def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values",
"def test_split_nested_class_from_key_lambda(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$$Lambda$1')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, '$Lambda$1')",
"def validate(self, source_value):\n errors = defaultdict(list)\n\n for field in self.get_mapping().fields:\n value = get_attribute(source_value, field.name)\n try:\n field.is_valid(value)\n except ValidationError as e:\n errors[field.name].append(e.message)\n\n if errors:\n raise ValidationError(errors)\n else:\n return super(Nested, self).validate(source_value)",
"def OnRenameAccept(self, item, value):\r\n\r\n le = TreeEvent(wxEVT_TREE_END_LABEL_EDIT, self.GetId())\r\n le._item = item\r\n le.SetEventObject(self)\r\n le._label = value\r\n le._editCancelled = False\r\n\r\n return not self.GetEventHandler().ProcessEvent(le) or le.IsAllowed()",
"def validate(self, item):\n attempt, pkg_analyzer, journal_and_issue_data = item[:3]\n j_publisher_name = journal_and_issue_data.get('journal', {}).get('publisher_name', None)\n if j_publisher_name:\n data = pkg_analyzer.xml\n xml_publisher_name = data.findtext('.//journal-meta/publisher/publisher-name')\n\n if xml_publisher_name:\n if self._normalize_data(xml_publisher_name) == self._normalize_data(j_publisher_name):\n r = [models.Status.ok, 'Valid publisher name: ' + xml_publisher_name]\n else:\n r = [models.Status.error, 'Mismatched data: %s. Expected: %s' % (xml_publisher_name, j_publisher_name)]\n else:\n r = [models.Status.error, 'Missing data: publisher name']\n else:\n r = [models.Status.error, 'Missing data: publisher name, in scieloapi']\n return r",
"def post_validated(self, struct, item, value):\n return value",
"def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False",
"def validateName(self, info):\n for name, childInfo in info.devices.iteritems():\n if name != childInfo.name:\n raise ConfigurationNameMismatchError(name, childInfo.name)\n self.validateName(childInfo)",
"def _validate_post(self, value, name, result):\n return result",
"def validate_insert(self, s, internal=True):\n super(FieldSet, self).validate_insert(s, internal) # mandatory check\n if s and s not in [d[0] for d in self.details]:\n valid = []\n for k,v in self.details:\n valid.append(\"%s=%s\" % (k, v))\n raise FilemanError(\"\"\"Value [%s] is not valid. must be one of: %s\"\"\" % (s, \", \".join(valid)))",
"def _validate(self, instance, value):",
"def _validate_impl(self, value, name):\n raise NotImplementedError()",
"def check_attr_key(cls, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for a in i[1]['attrs'].items():\n if a[1]['key'] == cls.attr_key:\n raise ItemAttributeKeyDuplicate(a[1]['class'], cls, cls.attr_key)",
"def __getitem__(self, item):\n if self.child_keys is None:\n self.child_keys = sorted(self.children.keys(), key=str.lower)\n return self.children[self.child_keys[item]]",
"def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n for child in paramInput.subparts:\n if child.getName() == \"state\":\n outcome = child.parameterValues[\"outcome\"]\n value = child.value\n self.mapping[outcome] = value\n try:\n float(outcome)\n self.isFloat = True\n except:\n self.isFloat = False\n if outcome in self.values:\n self.raiseAnError(IOError,'Categorical distribution has identical outcomes')\n else:\n self.values.add(float(outcome) if self.isFloat else outcome)\n else:\n self.raiseAnError(IOError,'Invalid xml node for Categorical distribution; only \"state\" is allowed')\n self.initializeDistribution()\n self.upperBoundUsed = True\n self.lowerBoundUsed = True",
"def handle_operator(node, dic, validator, entry_list, messages, whole_validator, current_elem):\n if node == '$reference':\n new_list = dc(entry_list)\n new_list.append(validator[node])\n check_dict_alg(\n dic, whole_validator[validator[node]], new_list, messages, whole_validator, current_elem\n )\n elif node == '$forElem':\n traversed_dic = traverse_dict(dic, entry_list)\n if traversed_dic is not None:\n for elem in traversed_dic:\n new_list = dc(entry_list)\n new_list.append(elem)\n check_dict_alg(\n dic, validator['$forElem'], new_list, messages, whole_validator, elem\n )\n else:\n add_message(messages, current_elem, \"Error in traversing dict!\")\n elif node.startswith('$selection__'):\n select_type = node.split('__')[1]\n select_dic = traverse_dict(dic, entry_list)\n if select_type in select_dic:\n select = select_dic[select_type]\n rest_validator = validator[node][select]\n check_dict_alg(dic, rest_validator, entry_list, messages, whole_validator, current_elem)\n else:\n add_message(\n messages, current_elem, \"Could not find \" + select_type + \" in \" + str(entry_list)\n )\n elif node.startswith('$exists__'):\n # TODO handle it somehow...\n pass",
"def test_basic_singleton_key_error(self):\n\n schema = {\n ('root', str): {\n ('sample node', str, 'sample'): ('node', str, r'[a-z]*')\n }\n }\n data = {'root': {'not sample': 'node'}}\n\n ERRORS = lws.return_errors()\n expected_schema = {\n ('root', 'root'): [('sample node', ERRORS['key'])]\n }\n expected_data = {\n ('root', 'root'): [('not sample', ERRORS['key'])]\n }\n\n assert dict(lws.validate_schema(schema, data)) == expected_schema\n assert dict(lws.validate_data(schema, data)) == expected_data",
"def _check_nested(self, key, self_val, nested):\n nested_val = getattr(nested, key)\n assert self_val == nested_val, \\\n \"selector['{}']='{}' in '{}' doesn't match header['{}']='{}' in nested file '{}'.\".format(\n key, self_val, self.filename, key, nested_val, nested.filename)",
"def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))",
"def process_item(self, item, spider):\n session = self.Session()\n product = Product()\n subcategory = Subcategory()\n category = Category()\n product.name = item[\"title\"]\n product.source = item[\"source\"]\n if 'rate' in item:\n product.rate = item[\"rate\"]\n if 'safety' in item:\n product.safety = item[\"safety\"]\n if 'quality' in item:\n product.quality = item[\"quality\"]\n subcategory.name = item[\"subcategory\"]\n category.name = item[\"category\"]\n\n # Check for product duplicate\n exist_product = session.query(Product).filter_by(name = product.name).first()\n if exist_product is not None:\n exist_product.rate = product.rate\n exist_product.safety = product.safety\n exist_product.quality = product.quality\n exist_product.source = product.source\n else:\n # Check for subcategory duplicate\n exist_subcategory = session.query(Subcategory).filter_by(name = subcategory.name).first()\n if exist_subcategory is not None:\n exist_subcategory.products.append(product)\n else:\n subcategory.products.append(product)\n # Check for category duplicate\n exist_category = session.query(Category).filter_by(name = category.name).first()\n if exist_category is not None:\n exist_category.subcategories.append(subcategory)\n else:\n category.subcategories.append(subcategory)\n \n try:\n session.add(product)\n except:\n session.rollback()\n raise\n\n try:\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item",
"def compare(self, subnode) -> bool:\n\t\t# OK the node if it has a different name.\n\t\tif subnode.name != self.name:\n\t\t\treturn True\n\t\t# Alter self if incorrect type\n\t\tself._get_true_type(subnode.get_typestring())\n\t\t# Add filenames\n\t\tif subnode.name == \"File\":\n\t\t\tself.filenames.update(subnode.filenames)"
] | [
"0.45828247",
"0.4572428",
"0.45380762",
"0.44724888",
"0.42696497",
"0.42584473",
"0.42416134",
"0.41420826",
"0.41245428",
"0.41224957",
"0.4097658",
"0.4080648",
"0.40750405",
"0.40694186",
"0.40613383",
"0.4061312",
"0.40393326",
"0.40243196",
"0.3988627",
"0.3969611",
"0.39599988",
"0.3955689",
"0.39445496",
"0.39434463",
"0.39084497",
"0.3879124",
"0.3877231",
"0.38762397",
"0.3867846",
"0.38664564"
] | 0.62593156 | 0 |
get requirements file line. | def get_line(self):
# type: () -> str
line = "{}=={}".format(self.name, self.version)
if self.type != RequirementType.LATEST_VERSION:
line += ' # ' + TEMPLATES[self.type]
if self.type == RequirementType.NOT_LATEST_VERSION:
line = line.replace(r'(\S*)', self.error_version)
return line + '\n' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None",
"def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read().splitlines()",
"def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)",
"def GetLine(line):\r\n pass",
"def parse_requirements(requirements_file='requirements.txt'):\n lines = []\n with open(requirements_file) as reqs:\n for _ in reqs:\n line = _.split('#')[0]\n if line.strip():\n lines.append(line)\n return lines",
"def get_requirements():\n with open('requirements.txt') as fd:\n lines = fd.read().splitlines()\n requires, links = [], []\n for line in lines:\n if line.startswith('git+'):\n links.append(line)\n elif line:\n requires.append(line)\n return requires, links",
"def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]",
"def get_requirements(req):\n\n install_requires = []\n with open(req) as f:\n for line in f:\n if not line.startswith(\"#\"):\n install_requires.append(line.strip())\n return install_requires",
"def get_requirements():\n\n with open('requirements.txt', 'r') as f:\n requirements = f.readlines()\n requires = []\n for require in requirements:\n if require.startswith(\"#\") or require.startswith(\"\\n\"):\n continue\n else:\n requires.append(require.replace(\"\\n\", \"\"))\n return requires",
"def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]",
"def get_requirements_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-13]\n req_path = os.path.join(root, 'requirements.txt')\n\n return req_path",
"def get_readme_line(self, test_name, line_match):\n return self.get_output_line(test_name, line_match, \"README\")",
"def get_requirement_info():\n links, requirements = [], []\n info = {'dependency_links': links, 'install_requires': requirements}\n requirements_path = 'requirements.txt'\n\n if not os.path.isfile(requirements_path):\n print('requirements.txt not found. Did you forget it?')\n return info\n\n reqs = filter(None, map(str.strip, open(requirements_path)))\n for line in reqs:\n if is_http(line):\n i = line.find('#egg=')\n if i == -1:\n raise SetupError('Missing \\'#egg=\\' in requirement link.')\n links.append(line[:i])\n requirements.append(line[i+5:])\n else:\n requirements.append(line)\n return info",
"def parse_requirement(req_text):\n req_text = req_text.strip()\n if not req_text:\n return None\n if req_text[0] == \"#\":\n return None\n return pkg_resources.Requirement.parse(req_text)",
"def read_requirements(filepath):\n with open(filepath, 'r') as fd:\n return fd.read().split('\\n')",
"def parse_requirements(fn):\n with open(fn) as f:\n rv = []\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n rv.append(line)\n return rv",
"def first_line(self):\n with open(self.file_path) as file:\n return file.readline()",
"def read_requirements():\r\n reqs_path = os.path.join('.', 'requirements.txt')\r\n with open(reqs_path, 'r') as f:\r\n requirements = [line.rstrip() for line in f]\r\n return requirements",
"def test_req_file_parse_egginfo_end_of_line_with_url(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"https://example.com/foo.tar.gz#egg=wat\")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert len(reqs) == 1\n assert reqs[0].name == \"wat\"",
"def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires",
"def read_requirements():\n reqs_path = path.join('.', 'requirements.txt')\n with open(reqs_path, 'r') as f:\n requirements = [line.rstrip() for line in f]\n return requirements",
"def read_requirements():\n with open('requirements.txt') as f:\n requirements = f.readlines()\n return [element.strip() for element in requirements]",
"def get_version(rel_path: str) -> str:\n for line in read(rel_path).splitlines():\n if line.startswith(\"VERSION\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")",
"def _path_and_line(self):\n path, line = (re.match(r'-r (.*) \\(line (\\d+)\\)$',\n self._req.comes_from).groups())\n return path, int(line)",
"def getLine(self):\n\t\tif len(self._completeLines) > 0:\n\t\t\treturn self._completeLines.pop(0)\n\t\telse:\n\t\t\treturn None",
"def readline(self) -> Optional[str]:",
"def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)",
"def get_input(line):\n tex_input_filename_re = r\"\"\"{[^}]*\"\"\"\n m = re.search(tex_input_filename_re, line)\n return m.group()[1:]",
"def parse_requirements_txt():\n root = os.path.dirname(os.path.abspath(__file__))\n\n requirements = []\n dependencies = []\n\n with open(os.path.join(root, 'requirements.txt'), 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n\n egg = re.match('git\\+.*#egg=(.*)$', line)\n if egg is not None:\n egg = egg.groups()[0]\n requirements.append(egg)\n dependencies.append(line)\n else:\n requirements.append(line)\n\n return requirements, dependencies",
"def parse_requirements_file(filename):\n with open(filename) as input_file:\n return input_file.read().splitlines()"
] | [
"0.6740572",
"0.6701391",
"0.65815175",
"0.64647526",
"0.6415879",
"0.6412485",
"0.6349819",
"0.63256943",
"0.63117",
"0.6291996",
"0.62328476",
"0.6201596",
"0.6177119",
"0.6160064",
"0.6157574",
"0.615133",
"0.6138522",
"0.6128042",
"0.6090447",
"0.6074949",
"0.6072613",
"0.6064727",
"0.6062027",
"0.6029496",
"0.6026504",
"0.5988322",
"0.59870327",
"0.59679586",
"0.59501535",
"0.5935482"
] | 0.6781809 | 0 |
Split line on text and comment | def split_line(self, line):
# type: (str) -> tuple
parts = [s.strip() for s in line.split('#', 1)]
package = parts[0]
comment = parts[1] if len(parts) >= 2 else ''
return package, comment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_comment(cls, code):\r\n if '#' not in code: return code\r\n #: Remove comments only (leave quoted strings as they are)\r\n subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)\r\n return re.sub(cls.re_pytokens, subf, code)",
"def to_multi_line_comment(text: str) -> str:\n pass",
"def standalone_comment_split(\n line: Line, features: Collection[Feature], mode: Mode\n) -> Iterator[Line]:\n if not line.contains_standalone_comments(0):\n raise CannotSplit(\"Line does not have any standalone comments\")\n\n current_line = Line(\n mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets\n )\n\n def append_to_line(leaf: Leaf) -> Iterator[Line]:\n \"\"\"Append `leaf` to current line or to new line if appending impossible.\"\"\"\n nonlocal current_line\n try:\n current_line.append_safe(leaf, preformatted=True)\n except ValueError:\n yield current_line\n\n current_line = Line(\n line.mode, depth=line.depth, inside_brackets=line.inside_brackets\n )\n current_line.append(leaf)\n\n for leaf in line.leaves:\n yield from append_to_line(leaf)\n\n for comment_after in line.comments_after(leaf):\n yield from append_to_line(comment_after)\n\n if current_line:\n yield current_line",
"def DropComment(text):\n grp = re.compile(r'/\\*[^/]*\\*/').split(text)\n result = string.join(grp);\n grp = re.compile(r'//.*').split(result);\n result = string.join(grp);\n #result = string.join(result.split('\\n')) #remove the line break\n return(' '+result);",
"def to_single_line_comment(text: str) -> str:\n pass",
"def splitBodyLines(cls, text):\n\n def remove_comments(line):\n \"\"\"\n Returns the given line stripped of any comments.\n \"\"\"\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line\n\n # Remove comments, strip whitespace, and return only non-blank lines\n lines = map(str.strip, map(remove_comments, text.splitlines()))\n return [l for l in lines if l]",
"def splitdefines(txt):\n pre = []\n c = []\n for line in txt.split(\"\\n\"):\n if line.startswith(\"#\"):\n pre.append(line)\n else:\n c.append(line)\n return pre, c",
"def get_title_block(txt):\n res = \"\"\n in_title = False\n for line in txt.splitlines():\n if line.startswith(\"#!\"):\n in_title = True\n res += clean_comment(line)\n continue\n if in_title:\n if line.startswith(\"#\"):\n res += clean_comment(line)\n else:\n break\n return res",
"def line_split(self, line):\n parts = []\n part = None\n quote = None\n for c in line:\n if part is None and not self.is_space(c):\n quote = c if self.is_quote(c) else None\n part = c if quote is None else \"\"\n elif part is not None and quote is None and not self.is_space(c):\n part += c\n elif part is not None and quote is not None:\n if c != quote:\n part += c\n else:\n parts.append(part)\n part = None\n quote = None\n elif part is not None and quote is None and self.is_space(c):\n parts.append(part)\n part = None\n quote = None\n if part is not None:\n parts.append(part)\n return parts",
"def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block",
"def parse_space_in_comment(comment):\n max_spaces_dict = {}\n for line in comment:\n if (not line.strip()) or line.find(\" \") == -1:\n # empty line or line do not have spaces in it.\n continue\n max_spaces_dict[line] = max(len(list(v)) for k, v in groupby(line) if k == \" \")\n\n sep = [(line.index(\" \" * count) + count) for line, count in max_spaces_dict.items()]\n sep.sort()\n count_dict = {len(list(v)):k for k, v in groupby(sep)}\n\n if max(count_dict.keys()) < 3:\n return {}, comment\n\n comment_dict = {}\n # more than 3 lines following the same pattern, extract from it.\n sep_position = count_dict[max(count_dict.keys())] - 1\n debug(\"found boundary: %s\" % sep_position)\n\n def line_match_pattern(line, position, prev_line=None, next_line=None, recursive=True):\n \"\"\"\n for a line to match a pattern, its next line or its prev line must\n also match the pattern. Notice that the function would call itself\n to see if its next/prev line matches the pattern. So we used a flag\n to stop it from going deeper into the loop.\n \"\"\"\n if line.strip() and len(line) <= position + 1:\n return False\n if not (line[position] == \" \" and line[position+1] != \" \"):\n # The line itself must match the pattern.\n return False\n if (prev_line is None) and (next_line is None) and recursive:\n print(\"##### Bad way to call this function. ####\")\n return False\n\n if not recursive:\n # If we do not go deeper, then the current line just match the pattern.\n return True\n\n if prev_line and prev_line.strip() and not (line_match_pattern(prev_line, position, recursive=False)):\n return False\n\n if next_line and next_line.strip() and not (line_match_pattern(next_line, position, recursive=False)):\n return False\n\n return True\n\n comment_copy = copy(comment)\n for index, line in enumerate(comment_copy):\n if (not line.strip()) or line.find(\" \") == -1 or len(line) < sep_position:\n # empty line, or line has no space, or line to short.\n continue\n if index == 0:\n if line_match_pattern(line, sep_position, next_line=comment_copy[1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"First line, but it does not match\")\n continue\n elif index == len(comment_copy)-1:\n if line_match_pattern(line, sep_position, prev_line=comment_copy[-1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"last line, but it does not match\")\n continue\n elif line_match_pattern(line, sep_position, prev_line=comment_copy[index-1], next_line=comment_copy[index+1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n return comment_dict, comment",
"def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]",
"def test_remove_single_line_comments_annotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\t//@Test //comment\n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\t//@Test //comment\n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect",
"def splitLine(text):\r\n sp = text.split(\" \")\r\n try:\r\n a = sp[0]\r\n b = \" \".join(sp[1:])\r\n except:\r\n a = text\r\n b = \"\"\r\n return a, b",
"def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]",
"def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]",
"def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)",
"def Split_to_Lines(self):\r\n\r\n line = []\r\n word = \"\"\r\n comment = False\r\n String = False\r\n for i in range(0, len(self.Code)):\r\n if self.Code[i] == '\\n':\r\n if word != '':\r\n if (String is True) and (word[0] != word[len(word) - 1]):\r\n return False\r\n line.append(word)\r\n if len(line) != 0:\r\n self.Code_Lines.append(line)\r\n if len(line) >= 2:\r\n if line[0] == \"end\":\r\n break\r\n word = \"\"\r\n line = []\r\n comment = False\r\n String = False\r\n elif not comment:\r\n if self.Code[i] == ' ':\r\n if not String:\r\n if word != \"\" and word != '':\r\n line.append(str(word))\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == '\"':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\"'\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n elif self.Code[i] == '\\'':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\\''\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if String:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == ';':\r\n comment = True\r\n\r\n elif self.Code[i] in self.Special_Symbols:\r\n if word != '':\r\n line.append(word)\r\n line.append(self.Code[i])\r\n word = \"\"\r\n else:\r\n line.append(self.Code[i])\r\n\r\n else:\r\n word += self.Code[i].lower()\r\n\r\n return self.Code_Lines",
"def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines",
"def clean_comment(line):\n if line.startswith(\"#!\"):\n line = line[2:]\n else:\n line = line[1:]\n if line.startswith(\" \"):\n line = line[1:]\n if not line.endswith('\\n'):\n line += '\\n'\n return line",
"def getHTMLComments(self, text):\n return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)",
"def test_remove_single_line_comments_noannotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect",
"def split_pun(comment):\n abb_regex = get_abbreviations_regex()\n \n comment = re.sub(r'([\\W_]+)',r' \\1 ',comment,flags=re.IGNORECASE)\n \n comment = re.sub(r' \\' ','\\'',comment,flags=re.IGNORECASE)\n \n comment = re.sub(r' \\. ','.',comment,flags=re.IGNORECASE)\n \n comment = re.sub(abb_regex,r\" . \",comment,flags=re.IGNORECASE)\n \n comment = re.sub(r'\\s{2,}',\" \",comment,flags=re.IGNORECASE)\n \n return comment",
"def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp",
"def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )",
"def splitlines(self) -> List[String]:\n pass",
"def separate_comments(self):\n if not hasattr(self, 'cleaned_html'):\n self.cleaned_html = self.clean_html()\n \n self.separated_comments = self.cleaned_html.split(self.post_splitter)\n return self.separated_comments",
"def Comment(self, comment):\n self.script.append(\"\")\n for i in comment.split(\"\\n\"):\n self.script.append(\"# \" + i)\n self.script.append(\"\")",
"def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result",
"def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('•', '\\n',text)\n return text"
] | [
"0.6692685",
"0.66081315",
"0.65925145",
"0.6545137",
"0.6362264",
"0.6226867",
"0.62236226",
"0.6117526",
"0.6067819",
"0.60001516",
"0.59967846",
"0.5966611",
"0.59597796",
"0.5954498",
"0.59440887",
"0.59440887",
"0.5927107",
"0.5925445",
"0.59013635",
"0.5877957",
"0.5875666",
"0.58529496",
"0.58512974",
"0.58387405",
"0.58302045",
"0.58242655",
"0.58213",
"0.5811949",
"0.5803566",
"0.5784433"
] | 0.6687648 | 1 |
Calculate cosine distance between two vector | def findCosineDistance(vector1, vector2):
vec1 = vector1.flatten()
vec2 = vector2.flatten()
a = np.dot(vec1.T, vec2)
b = np.dot(vec1.T, vec1)
c = np.dot(vec2.T, vec2)
return 1 - (a / (np.sqrt(b) * np.sqrt(c))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cosine_distance(u, v):\n #print u,v\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))",
"def cosine_distance(u, v):\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))",
"def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))",
"def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))",
"def cosine(vector_1, vector_2):\n\n def _norm(_v):\n return np.sqrt(sum([x ** 2 for x in _v.values()]))\n\n numerator = dot_product(vector_1, vector_2)\n denominator = _norm(vector_1) * _norm(vector_2)\n if denominator == 0:\n return -1\n return numerator / denominator",
"def cosine_dist(d1, d2):\n suma=0\n for x in d1:\n if x in d2:\n suma+=(d1[x]*d2[x])\n sqrt1=0\n sqrt2=0\n for i in d1:\n sqrt1+=math.pow(d1[i],2)\n for i in d2:\n sqrt2+=math.pow(d2[i],2)\n return 1-suma/(math.sqrt(sqrt1)*math.sqrt(sqrt2))",
"def cosine_distance(a, b, axis=1):\n a_norm = np.dot(a,a)**.5\n b_norm = np.sum(b**2, axis=axis)**.5\n return np.dot(b,a)/(a_norm*b_norm)",
"def get_cosine(vec1, vec2):\n OPS = get_current_ops()\n v1 = OPS.to_numpy(OPS.asarray(vec1))\n v2 = OPS.to_numpy(OPS.asarray(vec2))\n return numpy.dot(v1, v2) / (numpy.linalg.norm(v1) * numpy.linalg.norm(v2))",
"def cosine_distance(point1, point2):\n cos_dist = 0\n length_point1 = norm(point1)\n length_point2 = norm(point2)\n cos_dist = 1 - (dot_product(point1, point2)/(length_point1 * length_point2))\n return cos_dist",
"def cosine_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x_1` and rows of `x_2`\n # \"ij,ij->i\" := output[i] = sum_j x1[i, j] * x2[i, j]\n cos_thetas = tf.linalg.einsum(\"ij,ij->i\", x1, x2)\n cos_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n cos_distances = tf.maximum(cos_distances, 0.0)\n\n return cos_distances",
"def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))",
"def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))",
"def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs",
"def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))",
"def cosine_distance(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return 1 - (w12 / (w1 * w2).clamp(min=eps)).squeeze()",
"def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))",
"def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0",
"def cos(\r\n vec1: torch.FloatTensor, vec2: torch.FloatTensor, dim: int = -1\r\n) -> torch.FloatTensor:\r\n return torch.sum(vec1 * vec2, dim=dim) / (\r\n vec1.norm(dim=dim) * vec2.norm(dim=dim) + EPS\r\n )",
"def cosine_collection_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x1` and columns of `x2` transpose\n cos_thetas = tf.linalg.matmul(x1, x2, transpose_b=True)\n pairwise_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n pairwise_distances = tf.maximum(pairwise_distances, 0.0)\n\n return pairwise_distances",
"def cos_sim(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float]) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))",
"def cos_sim(v1, v2):\r\n return np.inner(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))",
"def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]",
"def cos_sim(vec1, vec2):\n if len(vec1) != len(vec2):\n print 'dimension does not agree.'\n numerator_sum = 0 \n for i in range(len(vec1)):\n numerator_sum = numerator_sum + vec1[i]*vec2[i]\n \n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n \n return numerator_sum/denom",
"def cosine_distance(A, B):\n\n A = A / T.sqrt(T.sum(A ** 2, axis=1)).reshape((-1, 1))\n B = B / T.sqrt(T.sum(B ** 2, axis=1)).reshape((-1, 1))\n D = T.dot(A, T.transpose(B))\n\n return 1 - D",
"def __cos_sim(self, v1, v2):\n if np.count_nonzero(v1) == 0 or np.count_nonzero(v2) == 0:\n # whenever at least one of the vectors is all zeros, spatial.distance.cosine will fail by returning nan\n ret = 0\n else:\n ret = 1 - spatial.distance.cosine(v1, v2)\n return ret",
"def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity",
"def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))",
"def cosine_sim(a: np.ndarray, \n b: np.ndarray \n ) -> float:\n return (\n 1 + a.dot(b) / \n (np.linalg.norm(a)*np.linalg.norm(b))\n ) / 2",
"def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim",
"def vector_cosine_angle(vec_1:tuple, vec_2:tuple)->float:\n if is_zero_vector(vec_1) or is_zero_vector(vec_2):\n return None\n return dot_product(vec_1, vec_2) / (magnitude(vec_1) * magnitude(vec_2))"
] | [
"0.8144758",
"0.806103",
"0.7972851",
"0.79563916",
"0.7951864",
"0.77495205",
"0.7739488",
"0.7737394",
"0.7714112",
"0.7667365",
"0.76573527",
"0.7630341",
"0.7616587",
"0.76105106",
"0.76048666",
"0.7574436",
"0.7551911",
"0.7540636",
"0.7515504",
"0.7509719",
"0.7507556",
"0.7460115",
"0.7421564",
"0.74071646",
"0.7367841",
"0.73649263",
"0.7355743",
"0.7353771",
"0.73448557",
"0.72659564"
] | 0.8303037 | 0 |
Add index operation with name to the operations given. | def add_index_operation(self, name, operations):
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index",
"def _apply_index_op(db, op):\n if 'createIndexes' not in op['o']:\n return\n o = op['o']\n coll_name = o['createIndexes']\n key = list(o['key'].items())\n name = o['name']\n return db[coll_name].create_index(key, name=name)",
"def add_operation(self, op):\n\n self.operations[op.name] = op",
"def __call__(self, op):\n self._handle_renameCollection(op)\n if self.regex.match(op['ns']):\n ns = self.regex.sub(self.new_ns, op['ns']).rstrip(\".\")\n logging.debug(\"renaming %s to %s\", op['ns'], ns)\n op['ns'] = ns\n if op['ns'].endswith('.system.indexes'):\n # index operation; update ns in the op also.\n self(op['o'])\n self._handle_create(op)",
"def addOp(self, op):\n self.operations << op",
"def AddOperation(self, op):\n self._operations.append(op)",
"def register_operation(self, name, result, args, kwargs):\r\n if not isinstance(result, autodiff.tensor.Tensor):\r\n result = autodiff.tensor.Tensor(result, graph=self)\r\n args = [x if isinstance(x, autodiff.tensor.Tensor) \r\n else autodiff.tensor.Tensor(x, graph=self) for x in args]\r\n self.operation_map[result.id] = Operation(name, result, args, kwargs)",
"def add_target_and_index(self, name, sig, signode):\n key = normalize_object_name(name)\n if key in self.state.document.ids:\n return\n\n signode['names'].append(name)\n signode['ids'].append(key)\n signode['first'] = not self.names\n self.indexnode['entries'].append(\n ('single', 'JSON Objects; {}'.format(name), key, '', None))",
"def add_op(self, op):\n self._operations.append(op)",
"def _register_operation(self, **operation):\n name = operation[\"name\"]\n if name in self.operations:\n raise ValueError(\"operation name already registered: {}\".format(name))\n self.operations[name] = _Operation({**operation, \"resource\": self})",
"def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)",
"def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name",
"def add(self, **kwargs) -> None:\n self.append(Operation(**kwargs))",
"def add_impala_operation(op, name, database):\n udf.add_impala_operation(op, name, database)",
"def invoke(self, op):\n for rename in self:\n rename(op)",
"def addop(name, fields, args=None, alias=False):\n\n namespace = {\"fields\": fields, \"alias\": alias}\n\n if args is not None:\n namespace[\"args\"] = args\n\n # Dynamically create the \"name\" object\n type(name, (mn_pinky,), namespace)",
"def add_repair_operator(\n self, op: _OperatorType, name: Optional[str] = None\n ):\n logger.debug(f\"Adding repair operator {op.__name__}.\")\n self._r_ops[name if name else op.__name__] = op",
"def register_op(op_name, **kwargs):\n _DEFAULT_SCOPE[TargetRegistry].register_op(op_name, **kwargs)\n return",
"def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])",
"def _add_default_op(op_name):\n _add_op(\"__%s__\"%op_name, getattr(operator, op_name))",
"def set_operation_name(self, operation_name):\n return self",
"def _add_default_ops(op_name):\n _add_default_op(op_name)\n _add_default_reverse_op(op_name)",
"def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def getOperationByName(self, name):\n for item in self.operations:\n if item.name == name:\n return item\n raise KeyError, \"No operation named %s\" % name",
"def document_add(index_name, doc_type, doc, doc_id=None):\n resp = es.index(index=index_name, doc_type=doc_type, body=doc, id=doc_id)\n print(resp)",
"def add_operations_from(self, obj):\n\n for name in dir(obj):\n op = getattr(obj, name)\n if isinstance(op, Operation):\n self.add_operation(op)",
"def apply(db, op):\n dbname = op['ns'].split('.')[0] or \"admin\"\n _db = db[dbname]\n return _get_index_handler(db)(_db, op) or _apply_regular(_db, op)",
"def set_operation_name(self, operation_name: str) -> 'Span':\n with self.update_lock:\n self.operation_name = operation_name\n return self"
] | [
"0.68858343",
"0.6698154",
"0.64274466",
"0.6417586",
"0.63482445",
"0.6007961",
"0.5947242",
"0.5861968",
"0.5853543",
"0.57812166",
"0.57532567",
"0.57404816",
"0.57012236",
"0.5667653",
"0.5642528",
"0.5634883",
"0.56331265",
"0.5619371",
"0.55785316",
"0.5549118",
"0.55464095",
"0.55158126",
"0.5503876",
"0.5477135",
"0.5477135",
"0.54508245",
"0.5447577",
"0.54331696",
"0.53988826",
"0.5366012"
] | 0.85664165 | 0 |
Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param. | def _offset_for(self, param):
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def offset(self):\n return self.__offset",
"def offset(self):\n return self.__offset",
"def get_offset(self):\n return self.offset",
"def wm_offset(self):\n return self.get_par(\"offset\")",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def offset(self):\n return self._offset",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def offset(self):\n\n return self._offset",
"def get_pos(self, mode, param, param_idx):\n\n if mode == 0:\n return param[param_idx]\n elif mode == 1:\n return self.ptr + param_idx + 1\n elif mode == 2:\n return self.r + param[param_idx]",
"def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))",
"def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")",
"def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")",
"def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")",
"def offset(self):\n return self.query.offset",
"def offset(self):\r\n return self._get_instantiation()[3]",
"def get_field_relative_offset(self, field_name):\n return self.__field_offsets__[field_name]",
"def get_input_offset(self):\n return ELFLING_PADDING + len(self.__data) - 4",
"def min_offset(self):\n return self.offset",
"def offset(self):\n return _PositionD(self._dx, self._dy)",
"def get_offset(self):\r\n offset = self.offset\r\n\r\n if 'offset' in self.request_data:\r\n offset = self.request_data['offset']\r\n\r\n try:\r\n offset = int(offset)\r\n except ValueError:\r\n raise BadRequest(\"Invalid offset '%s' provided. Please provide an integer.\" % offset)\r\n\r\n if offset < 0:\r\n raise BadRequest(\"Invalid offset '%s' provided. Please provide a positive integer >= 0.\" % offset)\r\n\r\n return offset",
"def parameter_index(self):\n return self._parameter_index",
"def GetOffset(self, *args, **kwargs):\n pass",
"def offset(self):\n try:\n return self._annotations[EventData.PROP_OFFSET].decode('UTF-8')\n except (KeyError, AttributeError):\n return None"
] | [
"0.65088356",
"0.65088356",
"0.6396925",
"0.63015145",
"0.6291252",
"0.6291252",
"0.6291252",
"0.6291252",
"0.6291252",
"0.6291252",
"0.6291252",
"0.62532693",
"0.62532693",
"0.62532693",
"0.62496376",
"0.62265295",
"0.61739165",
"0.61539537",
"0.61539537",
"0.61539537",
"0.61262417",
"0.6096628",
"0.60391563",
"0.6031847",
"0.6002478",
"0.59846246",
"0.59754264",
"0.5955708",
"0.5952924",
"0.59426564"
] | 0.79988617 | 1 |
get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work | def _raveled_index_for(self, param):
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)",
"def _raveled_index_for_transformed(self, param):\n ravi = self._raveled_index_for(param)\n if self._has_fixes():\n fixes = self._fixes_\n ### Transformed indices, handling the offsets of previous fixes\n transformed = (np.r_[:self.size] - (~fixes).cumsum())\n return transformed[ravi[fixes[ravi]]]\n else:\n return ravi",
"def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx",
"def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i",
"def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()",
"def _get_array_index(array_path):\n\n if not array_path.startswith('@'):\n raise XJPathError('Array index must start from @ symbol.')\n array_path = array_path[1:]\n if array_path == 'last':\n return -1\n if array_path == 'first':\n return 0\n if array_path.isdigit() or (array_path.startswith('-')\n and array_path[1:].isdigit()):\n return int(array_path)\n else:\n raise XJPathError('Unknown index reference', (array_path,))",
"def _raveled_index(self):\n return np.r_[:self.size]",
"def _raveled_index(self):\n return np.r_[:self.size]",
"def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2",
"def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param",
"def pndindex(*args):\r\n return np.ndindex(*args)",
"def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])",
"def getbaraidx(self,idx_,sub_,weights_):\n maxnum_ = self.getbaraidxinfo((idx_))\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n num_ = ctypes.c_int64()\n _sub_minlength = (maxnum_)\n if (maxnum_) > 0 and sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnum_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int64) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int64))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _weights_minlength = (maxnum_)\n if (maxnum_) > 0 and weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights is not long enough: Is %d, expected %d\" % (len(weights_),(maxnum_)))\n if isinstance(weights_,numpy.ndarray) and not weights_.flags.writeable:\n raise ValueError(\"Argument weights must be writable\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_getbaraidx(self.__nativep,idx_,maxnum_,ctypes.byref(i_),ctypes.byref(j_),ctypes.byref(num_),_sub_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n num_ = num_.value\n _num_return_value = num_\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _weights_copyarray:\n weights_[:] = _weights_np_tmp\n return (_i_return_value,_j_return_value,_num_return_value)",
"def pndindex(*args):\n return np.ndindex(*args)",
"def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)",
"def getind(self,start,end,blk):\n\n if blk is None:\n # Return all blocks\n blk = np.arange(self.ind[start].size)\n\n ind=np.array([])\n for k,val in enumerate(blk):\n ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))\n return ind.astype(int)",
"def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)",
"def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()",
"def get_index_param(self, list_of_parameters_and_redshift, multiple_redshift=False):\n idx = pd.IndexSlice\n if multiple_redshift:\n ind = idx[self.data_type,list_of_parameters_and_redshift[0]] # first value is the redshift\n else :\n ind = idx[self.data_type,:]\n for i in range (self.num_parameters):\n if multiple_redshift:\n ind += idx[:,list_of_parameters_and_redshift[i+1]] # first value is the redshift\n else : \n ind += idx[:,list_of_parameters_and_redshift[i]] \n return ind",
"def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts):\n if ray.mu < 0:\n return (grid_idx)\n else:\n return (n_depth_pts - (grid_idx + 1))",
"def ravel_indices(shape, *args):\n new_positions = []\n for arg in args:\n new_positions.append(np.ravel_multi_index(arg, shape))\n return new_positions",
"def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2",
"def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value",
"def get_grid_index_for_ray_point(ray, ray_idx, n_depth_pts):\n if ray.mu < 0:\n return (ray_idx)\n else:\n return (n_depth_pts - ray_idx - 1)",
"def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0",
"def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0",
"def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk",
"def one_dim_index(self, i, j):\n return int(i + j * self.nx)",
"def mainIndices(self):\n return self.i1, self.i2",
"def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0"
] | [
"0.71273017",
"0.6010891",
"0.59065056",
"0.5804188",
"0.58011395",
"0.57862777",
"0.57440937",
"0.57440937",
"0.57353306",
"0.56626016",
"0.56598043",
"0.5625494",
"0.5600237",
"0.5588847",
"0.5567432",
"0.55587304",
"0.55522966",
"0.554781",
"0.55181646",
"0.5507306",
"0.5506176",
"0.549819",
"0.5472546",
"0.5460384",
"0.54576814",
"0.54576814",
"0.54547673",
"0.54545677",
"0.5453246",
"0.5439349"
] | 0.71871 | 0 |
Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True. | def _add_to_index_operations(self, which, reconstrained, what, warning):
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self._raveled_index()
which.add(what, index)
return index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def warn_inplace(exc, nav, repl_pairs, local_opt):\r\n if isinstance(exc, InconsistencyError):\r\n return\r\n return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)",
"def ensure_default_constraints(self,warn=False):\n positive_strings = ['variance','lengthscale', 'precision']\n for s in positive_strings:\n for i in self.grep_param_names(s):\n if not (i in self.all_constrained_indices()):\n name = self._get_param_names()[i]\n self.constrain_positive(name)\n if warn:\n print \"Warning! constraining %s postive\"%name",
"def _optimise(self):\n pass",
"def reset_parameters(self, p: Dict[str, ArrayType]):\n super().reset_parameters(p)\n if self.method == \"trust-constr\":\n if self.opt.nk:\n self._constraints[\"k\"].A = csc_matrix(self.opt.M(self.p).toarray())\n self._constraints[\"k\"].lb = -self.opt.c(self.p).toarray().flatten()\n if self.opt.na:\n eq = -self.opt.b(self.p).toarray().flatten()\n self._constraints[\"a\"].A = csc_matrix(self.opt.A(self.p).toarray())\n self._constraints[\"a\"].lb = eq\n self._constraints[\"a\"].ub = eq\n if self._constraints:\n self.minimize_input[\"constraints\"] = list(self._constraints.values())",
"def __adjust(self, *args):\n return \"adjust\"",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def _parameters_changed_notification(self, me, which=None):\n self._optimizer_copy_transformed = False # tells the optimizer array to update on next request\n self.parameters_changed()",
"def test_wrong_parameters(self):\n with self.assertWarns(RuntimeWarning):\n Parameters(1, mu=3, lambda_=2)",
"def accept_optimize():\n pass",
"def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes,\r\n clobber_dr_vals=True,\r\n perform=None, warn_input_not_reused=True):\r\n destroyed_idx_list = []\r\n destroy_map = getattr(node.op, 'destroy_map', {})\r\n for o_pos, i_pos_list in destroy_map.iteritems():\r\n destroyed_idx_list.extend(i_pos_list)\r\n destroyed_res_list = [node.inputs[i] for i in destroyed_idx_list]\r\n\r\n actually_inplace_outputs = []\r\n dmap = getattr(node.op, 'destroy_map', {})\r\n for oo, ii in dmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused and destroyed_res_list:\r\n if isinstance(node.op, OutputGuard):\r\n # The point of OutputGuard is to be declared as destructive\r\n # while not destroying anything\r\n continue\r\n if out_var is not in_var:\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as destroyed was not changed for node '%s'\",\r\n ii[0], str(node))\r\n\r\n vmap = getattr(node.op, 'view_map', {})\r\n for oo, ii in vmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused:\r\n # We don't try to optimize simple scalar and empty ndarray,\r\n # as this is not worth our time. This happen at least in\r\n # Subtensor when the output is a scalar But this depend on\r\n # the version of numpy!\r\n if getattr(out_var, 'size', 2) <= 1:\r\n continue\r\n if isinstance(node.op, OutputGuard):\r\n # This class is not in the final graph.\r\n continue\r\n if not _may_share_memory(out_var, in_var):\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as viewed but new memory allocated by node '%s'\",\r\n ii[0], str(node))\r\n\r\n for r_idx, r in enumerate(node.inputs):\r\n if not r.type.values_eq(r_vals[r], storage_map[r][0]):\r\n # some input node 'r' got changed by running the node\r\n # this may or may not be ok...\r\n if r in destroyed_res_list:\r\n # ok, we expected r to be destroyed\r\n if node in active_nodes:\r\n if dr_vals.get(r, (0, node))[1] is not node:\r\n # bad: there should only be one active node that destroys any variable\r\n raise Exception('failure in topological ordering')\r\n if clobber_dr_vals:\r\n dr_vals[r] = (storage_map[r][0], node) #no copy, this is the last use of this variable\r\n storage_map[r][0] = None #make sure that dr_vals[r] doens't get used again\r\n else:\r\n raise BadDestroyMap(node, r_idx, r_vals[r],\r\n storage_map[r][0], perform)\r\n\r\n return actually_inplace_outputs",
"def propose_optimize():\n pass",
"def _discretize(self, constraints_object):\n pass",
"def _constraints_other(self):\n pass",
"def addConstraint(constraint, problem):\n problem += constraint",
"def check_invalid_args_general(config):\n # Not mathematically correct, but might be required if prior is not\n # appropriate.\n if hasattr(config, 'kl_scale') and config.kl_scale != 1.0:\n warnings.warn('Prior matching term will be scaled by %f.'\n % config.kl_scale)\n\n if hasattr(config, 'store_final_model') and \\\n hasattr(config, 'train_from_scratch') and \\\n config.store_final_model and config.train_from_scratch:\n warnings.warn('Note, when training from scratch, the final model is ' +\n 'only trained on the last task!')",
"def ensure_default_constraints(self):\r\n positive_strings = ['variance', 'lengthscale', 'precision', 'decay', 'kappa']\r\n # param_names = self._get_param_names()\r\n currently_constrained = self.all_constrained_indices()\r\n to_make_positive = []\r\n for s in positive_strings:\r\n for i in self.grep_param_names(\".*\" + s):\r\n if not (i in currently_constrained):\r\n to_make_positive.append(i)\r\n if len(to_make_positive):\r\n self.constrain_positive(np.asarray(to_make_positive))",
"def change_priorities(self,idxs, errors): \n for i in range(len(idxs)):\n self.update(idxs[i] , errors[i])",
"def __relational_restriction_incorrect_parameter_vs_parameter(self):\n strTestName = 'Parameter higher or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, the parameter 2 must be lower or equal to 3*iRefParameter-4\n RxCSObject.paramAddMan('iParameter2', 'Int parameter')\n RxCSObject.paramType('iParameter2', int)\n RxCSObject.paramHE('iParameter2', 'iRefParameter1', mul=3, add=-4) # In English, iParameter must be higher than 4\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.iParameter2 = 4\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)",
"def __call__(self, p, q, verbosity=1, warn=True):\n if self.exactly_zero: return 0.0 # shortcut for trivial case\n if self.weight == 0:\n return _np.sum(_np.abs(q - p)) / 2\n\n #Set parameter values\n self.P.value[:] = p[:]\n self.Q.value[:] = q[:]\n\n treg_factor_ok = False\n self.Treg_factor.value = self.initial_treg_factor\n while not treg_factor_ok:\n\n obj1 = self._obj(self.t_params)\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1),\n **default_cvxpy_args(self.solver))\n\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed: # sanity check\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(_np.abs(self.T.value) - t_chk) < 1e-6)\n\n self.warning_msg = None\n if failed:\n if self.solver == \"SCS\":\n #raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n for eps in [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), eps=eps)\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed:\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(self.T.value - t_chk) < 1e-6)\n\n if eps > 1e-4:\n self.warning_msg = (\"ResidualTVD: Needed to increase eps to %g.\"\n \" The resulting ResidualTVD values are less precise.\") % eps\n if warn: print(self.warning_msg)\n break\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n\n #check that Treg_factor term doesn't dominate\n\n # Update: just leave this alone, since norm-penalty doesn't get reported - TODO later\n treg_factor_ok = True\n\n # ------------------------------------------------------------------\n #EXPERIMENTAL algorithms for updating Treg_factor ------------------\n # ------------------------------------------------------------------\n\n #resid_tvd = self._obj(self.T_params.value)\n #if resid_tvd > 10 * self.Treg_factor.value * _np.linalg.norm(self.T_params.value, 1):\n # Treg_factor_ok = True\n #else:\n # self.Treg_factor.value = resid_tvd / 10 # self.Treg_factor.value / 10\n\n #obj2 = self._obj(self.T_params.value)\n #if obj2 < obj1:\n # Treg_factor_ok = True\n #else:\n # #maybe penalty term dominated - reduce norm(tparams) penalty term\n # self.T_params.value[:] = self.t_params[:] #REVERT\n # self.T.value[:, :] = _np.sum([self.t_params[ind] * self.t_basis[ind]\n # for ind in range(self.dim)], axis=0) + _np.eye(self.n) # REVERT\n # self.Treg_factor.value = self.Treg_factor.value / 10\n # if self.Treg_factor.value > 1e-7:\n # print(\"REDUCING treg factor to: \", self.Treg_factor.value)\n # else:\n # Treg_factor_ok = True # give up!\n\n if self.Treg_factor.value != self.initial_treg_factor:\n if verbosity > 0: print(\"NOTE: Treg_factor was reduced to %g.\" % self.Treg_factor.value)\n #_warnings.warn((\"Initial Treg_factor (%g) was too large, and was reduced to %g.\"\n # \" Consider reducing the initial value to avoid repeating calculations.\")\n # % (self.initial_treg_factor, self.Treg_factor.value))\n\n obj2 = self._obj(self.T_params.value)\n if obj2 <= obj1:\n self.t_params[:] = self.T_params.value[:]\n else:\n print_revert_msg(\"ResidualTVD failed to reduce objective function (%g > %g)\", (obj2, obj1), verbosity)\n self.T_params.value[:] = self.t_params[:]\n self.T.value[:, :] = self.build_transfer_mx(self.t_params)\n\n return self._obj(self.t_params) # not self.obj.value b/c that has additional norm regularization",
"def test_creation_incorrect_change_hardbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, hardbounds=[0, 10])\n int_a.hardbounds = [0, 10, 20]",
"def constraints(self, x):\n pass",
"def test_creation_incorrect_change_softbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, softbounds=[0, 10])\n int_a.softbounds = [0, 10, 20]",
"def warning(self, *args, **kwargs):",
"def change_priorities(self,idxs,errors):\n #print(\"Indecies \",idxs)\n for i,idx in enumerate(idxs):\n self.update(idx, errors[i])",
"def constraints(self):\n ...",
"def __relational_restriction_correct_parameter_vs_parameter(self):\n strTestName = 'Parameter lower or equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, the parameter 2 must be lower or equal to 3*iRefParameter-4\n RxCSObject.paramAddMan('iParameter2', 'Int parameter')\n RxCSObject.paramType('iParameter2', int)\n RxCSObject.paramLE('iParameter2', 'iRefParameter1', mul=3, add=-4) # In English, iParameter must be higher than 4\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.iParameter2 = 5\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def add_constraint(self, constraint, problem):\n problem += constraint",
"def _perturbInPlaceHard(self):\n die"
] | [
"0.6421251",
"0.57638377",
"0.53960013",
"0.5364344",
"0.52853006",
"0.52772886",
"0.52772886",
"0.52772886",
"0.52637357",
"0.52537453",
"0.52390754",
"0.5176815",
"0.51558375",
"0.514963",
"0.51430506",
"0.5140084",
"0.5106313",
"0.5041951",
"0.49778527",
"0.4977233",
"0.4967542",
"0.49671486",
"0.49668357",
"0.49666795",
"0.4944968",
"0.4941914",
"0.4939274",
"0.49331748",
"0.49250418",
"0.49204156"
] | 0.71344423 | 1 |
Helper preventing copy code. Remove given what (transform prior etc) from which param index ops. | def _remove_from_index_operations(self, which, transforms):
if len(transforms) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
for t in list(transforms):
unconstrained = which.remove(t, self._raveled_index())
removed = np.union1d(removed, unconstrained)
if t is __fixed__:
self._highest_parent_._set_unfixed(self, unconstrained)
return removed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _remove_operator(self, operator):",
"def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict",
"def neg_inplace(a):",
"def remove_unused_args(args, thnn_args):\n def clean_name(name):\n name = name[:name.index('[')] if '[' in name else name\n if name.endswith('_'):\n name = name[:-1]\n return name\n uses = set([clean_name(arg['name']) for arg in thnn_args])\n uses.add('output_mask')\n args = [arg for arg in args if arg['name'] in uses]\n for arg in args:\n if 'default' in arg:\n del arg['default']\n return args",
"def exclude(self, *args, **kwargs):",
"def removeInputCopies(self):\n for p in self.assoc.parlist:\n if int(p['group']) == 1:\n _img = p['image'].datafile\n shutil.move(p['orig_filename'],_img)",
"def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)",
"def inverse_transform(self, X, copy=...):\n ...",
"def removed(self, comp):\n\t\tpass",
"def remove(func):",
"def composes_inplace_with(self):\n pass",
"def fast_inplace_check(inputs):\r\n fgraph = inputs[0].fgraph\r\n protected_inputs = [f.protected for f in fgraph._features if isinstance(f,theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs,[])#flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n\r\n inputs = [i for i in inputs if\r\n not isinstance(i,graph.Constant)\r\n and not fgraph.destroyers(i)\r\n and i not in protected_inputs]\r\n return inputs",
"def _tf_remove_noise_op(self):\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()",
"def sub_inplace(a, b):",
"def _revert(self):\n if self.kwargs.get(\"collect\"):\n remove_exported_collect_data(self.kwargs[\"collect\"])",
"def remove_parameters(self):\n self.parameters = []",
"def _op_inplace(self, op: str, other: t.Any) -> te.Self:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n if getattr(self.__members__, op)(other) is NotImplemented:\n return NotImplemented\n return self\n return NotImplemented",
"def ignore(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover",
"def remove_action(self, action_index):\n self.pipeline.drop(action_index, inplace=True)",
"def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return",
"def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines",
"def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(EquationUnit, self)._RemoveFromCloneList(clone, attrNamesToClone)\n \n dontClone = [\"_Funcs\", \"_FuncsDefs\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone",
"def get_other_params(step):\n params = copy.copy(step.get('parameters', {}))\n for to_remove in ['input', 'inputs', 'output', 'outputs', 'src_output', 'tgt_output']:\n if to_remove in params:\n del params[to_remove]\n return params",
"def remove_ops(self):\n return self._remove_ops",
"def _removeOutOfRangeTransformer(self, working_stats, params):\n\n choices = [int(choice) for choice, subsets in working_stats.iteritems()\n if [value for value in subsets if value > 0]]\n\n min_choice = min(choices)\n max_choice = max(choices)\n\n for choice in working_stats.keys():\n if int(choice) < min_choice or int(choice) > max_choice:\n del working_stats[choice]\n\n return working_stats",
"def _prune_parameter_by_idx(self,\n scope,\n params,\n pruned_idx,\n pruned_axis,\n place,\n lazy=False,\n only_graph=False,\n param_shape_backup=None,\n param_backup=None):\n if params[0].name() in self.pruned_list[pruned_axis]:\n return\n for param in params:\n assert isinstance(param, VarWrapper)\n param_t = scope.find_var(param.name()).get_tensor()\n if param_backup is not None and (param.name() not in param_backup):\n param_backup[param.name()] = copy.deepcopy(np.array(param_t))\n pruned_param = self.pruner.prune_tensor(\n np.array(param_t), pruned_idx, pruned_axis, lazy=lazy)\n if not only_graph:\n param_t.set(pruned_param, place)\n ori_shape = param.shape()\n\n if param_shape_backup is not None and (\n param.name() not in param_shape_backup):\n param_shape_backup[param.name()] = copy.deepcopy(param.shape())\n new_shape = list(param.shape())\n new_shape[pruned_axis] = pruned_param.shape[pruned_axis]\n param.set_shape(new_shape)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format(\n str(param.name()),\n str(pruned_axis), str(ori_shape), str(param.shape())))\n self.pruned_list[pruned_axis].append(param.name())",
"def removeAutoSaveFilter(filter):",
"def __delitem__(self, i):\n # An element of a policy function can't be deleted",
"def or__inplace(a,b):"
] | [
"0.5864179",
"0.57993364",
"0.5767263",
"0.5759645",
"0.57012475",
"0.5697558",
"0.5676129",
"0.56178457",
"0.559176",
"0.55809194",
"0.5571276",
"0.5533166",
"0.5462391",
"0.5455604",
"0.54388994",
"0.54100037",
"0.539974",
"0.5399218",
"0.53672886",
"0.53319484",
"0.53278744",
"0.53207713",
"0.5289455",
"0.52678233",
"0.52660394",
"0.5256599",
"0.5255282",
"0.51993924",
"0.5191167",
"0.51896566"
] | 0.65478945 | 1 |
Emit a JSON representation of a given row | def format(self, row):
return json.dumps(row.print_fields) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, row: Optional[Any] = None):\n self.fout.write('{}\\n'.format(json.dumps(row, cls=self.encoder)))",
"def row_to_json(row: sqlite3.Row) -> str:\n d = {}\n for key in row.keys():\n d[key] = row[key]\n\n return json.dumps(d)",
"def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)",
"def write_rows(self, rows: Union[List[dict], dict]):\n rows = listify(rows)\n flat = self.get_arg_value(\"json_flat\")\n\n indent = None if flat else 2\n prefix = \" \" * indent if indent else \"\"\n\n for row in rows:\n if self._first_row:\n pre = \"\" if flat else \"\\n\"\n else:\n pre = \"\\n\" if flat else \",\\n\"\n\n self._first_row = False\n self._fd.write(pre)\n\n value = json.dumps(row, indent=indent)\n value = textwrap.indent(value, prefix=prefix) if indent else value\n self._fd.write(value)\n del value, row",
"def to_json_line(bq_row):\n row = dict()\n for key in bq_row:\n row[key] = bq_row[key]\n\n # default=str converts non JSON serializable objects to str eg datetime.datetime\n row_json = json.dumps(row, default=str)\n return row_json.encode('utf-8')",
"def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)",
"def row_list_to_json(rows: List[sqlite3.Row]) -> str:\n l = []\n for row in rows:\n l.append(row_to_json(row))\n\n return json.dumps(l)",
"def format_row(self, row):\n raise NotImplementedError()",
"def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass",
"def write_row(self, data):\n raise NotImplementedError()",
"def toJSON(self, file_path=str) -> None:\n try:\n return(exportJSON([value[0] for value in self.table.items()], file_path))\n except Exception as error:\n print(f\"Error: self.toJSON({file_path}) -> {error}\")",
"def kvp_writer_udf(row, fm_config):\n\n # get handler, that includes defaults\n xml2kvp_defaults = XML2kvp(**fm_config)\n\n # convert XML to kvp\n xml2kvp_handler = XML2kvp.xml_to_kvp(\n row.document, return_handler=True, handler=xml2kvp_defaults)\n\n # loop through and convert lists/tuples to multivalue_delim\n for k, v in xml2kvp_handler.kvp_dict.items():\n if type(v) in [list, tuple]:\n xml2kvp_handler.kvp_dict[k] = xml2kvp_handler.multivalue_delim.join(\n v)\n\n # mixin other row attributes to kvp_dict\n xml2kvp_handler.kvp_dict.update({\n 'record_id': row.record_id,\n 'combine_id': row.combine_id\n })\n\n # return JSON line\n return json.dumps(xml2kvp_handler.kvp_dict)",
"def gen_json(self, show_headers=True, show_tags=True, use_objects=False):\n is_first = True\n yield \"[\\n\"\n if use_objects:\n for row in self:\n if is_first:\n is_first = False\n yield json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n yield \",\\n\" + json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n for raw in self.gen_raw(show_headers, show_tags):\n if is_first:\n is_first = False\n yield json.dumps(raw)\n else:\n yield \",\\n\" + json.dumps(raw)\n yield \"\\n]\\n\"",
"def to_json(self):\n\t\treturn self._dataframe.reset_index().to_json(orient=\"records\")",
"def _jsonify(self):\n return self.experiment_record.to_ddb_record()",
"def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}",
"def format(self, table):\n #return table.data.to_json()\n data = _replace_nans(table.as_array().tolist())\n if hasattr(data, \"strip\") or \\\n (not hasattr(data, \"__getitem__\") and \\\n not hasattr(data, \"__iter__\")):\n # data is not a list/tuple => wrap it\n data = [ data ]\n v = {\n 'offset': table.offset,\n 'data': data,\n 'headers': table.headers,\n 'types': table.types,\n }\n if table.sizes is not None:\n v[\"sizes\"] = table.sizes\n return json.dumps(v, cls=ExtEncoder)",
"def write_row(row: dict):\n row = {k: format_float(v) for k, v in row.items()}\n writer.writerow(row)\n csvfile.flush()",
"def _serialize_row(self, data):\n if isinstance(data, str):\n return data\n\n if isinstance(data, np.ndarray):\n data = np.ndarray.flatten(data)\n\n if hasattr(data, \"__len__\"):\n if len(data) == 0:\n raise ValueError(\"Cannot serialize empty array\")\n csv_buffer = io.StringIO()\n csv_writer = csv.writer(csv_buffer, delimiter=\",\")\n csv_writer.writerow(data)\n return csv_buffer.getvalue().rstrip(\"\\r\\n\")\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))",
"def write(self, row):\n bytes = struct.pack(self.pack_format, *row)\n self.f.write(bytes)",
"def encode_record(record):\n return json.dumps(record)",
"def to_json(self, orient=\"columns\", double_precision=10,\n force_ascii=True):\n return dumps(self, orient=orient, double_precision=double_precision,\n ensure_ascii=force_ascii)",
"def dumps(row):\n return cPickle.dumps(row)",
"def to_json_string(self):\n return json.dumps(self.to_dict(), indent = 2, sort_keys = True) + \"\\n\"",
"def to_json(self, record: Mapping[str, Any]) -> str:\n return self.json_lib.dumps(record, cls=ObjectEncoder)",
"def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"",
"def _set_outputrow(self, v):\n super(Row, self).__setattr__(\"__output__\", v)",
"def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)",
"def process_row(self, row: Union[List[dict], dict]) -> List[dict]:\n rows = listify(row)\n rows = self.do_pre_row(rows=rows)\n row_return = [{\"internal_axon_id\": row[\"internal_axon_id\"]} for row in rows]\n rows = self.do_row(rows=rows)\n self.write_rows(rows=rows)\n del rows, row\n return row_return",
"def as_json(self):"
] | [
"0.745525",
"0.7241681",
"0.72320414",
"0.6897235",
"0.68846",
"0.6736316",
"0.65954936",
"0.65526325",
"0.6160118",
"0.61541754",
"0.6141491",
"0.61156374",
"0.6115557",
"0.6070374",
"0.60693794",
"0.6057097",
"0.5938166",
"0.5934188",
"0.58886075",
"0.58313084",
"0.5826782",
"0.5772088",
"0.5771293",
"0.5747949",
"0.57186973",
"0.5710404",
"0.57039976",
"0.57014555",
"0.56853765",
"0.5676864"
] | 0.7718322 | 0 |
Creates a dictonary of nodes listed by currie id from answers 1 and 2 | def make_node_dict(self):
if self.input1 is None or self.input2 is None:
raise Exception("Missing input: please run the populate() method first")
self.node_dict1 = {}
for node in self.input1['knowledge_graph']['nodes']:
self.node_dict1[node['id']] = node
self.node_dict2 = {}
for node in self.input2['knowledge_graph']['nodes']:
self.node_dict2[node['id']] = node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nodes_by_id(ntwrk, nodeid):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if k == nodeid}",
"def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict",
"def ans():\n ret = {}\n for i in range(12):\n ret[ind[i]] = ans2[ind[i]]\n ret['id']=\"id\"\n return jsonify(ret)",
"def get_answers(self):\r\n answers = {}\r\n for ielt in self.ielements:\r\n ie_id = ielt.get('id')\r\n answers[ie_id] = {'rectangle': ielt.get('rectangle'), 'regions': ielt.get('regions')}\r\n\r\n return answers",
"def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict",
"def all_in_edges_of_node(self, id1: int) -> dict:\n if id1 in self.Nodes:\n ans = {}\n for i, j in self.Edges.items():\n if id1 in j:\n ans[i] = j[id1]\n return ans\n return {}",
"def create_nodes_and_edges(list_of_nodes_, adjacency_matrix_):\n\n # Random numbers for the labels\n random_numbers = np.arange(len(list_of_nodes_))\n np.random.shuffle(random_numbers)\n print random_numbers\n\n # Update the nodes: Every node gets told how many other nodes know it\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n node.knows = int(sum(np.ravel(adjacency_matrix[node.id])))\n node.known_by = int(np.ravel(sum(adjacency_matrix[:,node.id])))\n\n # Update the nodes: Every node gets its questionaire answers\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n try:\n with open('./data-answers/{}.csv'.format(node.id), 'r') as f:\n answers = f.readlines()\n node.age = answers[0].strip() if (answers[0].strip() and answers[0].strip() != '-1') else \"'?'\"\n node.academies = answers[1].strip() if (answers[1].strip() and answers[1].strip() != '-1') else \"'?'\"\n node.waylength = answers[2].strip() if (answers[2].strip() and answers[2].strip() != '-1') else \"'?'\"\n node.hiking = answers[3].strip() if (answers[3].strip() and answers[3].strip() != '-1') else \"'?'\"\n node.lake = answers[4].strip() if (answers[4].strip() and answers[4].strip() != '-1') else \"'?'\"\n node.choir = answers[5].strip() if (answers[5].strip() and answers[5].strip() != '-1') else \"'?'\"\n node.games = answers[6].strip() if (answers[6].strip() and answers[6].strip() != '-1') else \"'?'\"\n node.drinks = answers[7].strip() if (answers[7].strip() and answers[7].strip() != '-1') else \"'?'\"\n node.sleep = answers[8].strip() if (answers[8].strip() and answers[8].strip() != '-1') else \"'?'\"\n node.number = answers[9].strip() if (answers[9].strip() and answers[9].strip() != '-1') else \"'?'\"\n node.hotness = answers[10].strip() if (answers[10].strip() and answers[10].strip()!= '-1') else \"'?'\"\n node.hookups = answers[11].strip() if (answers[11].strip() and answers[11].strip()!= '-1') else \"'?'\"\n node.description = answers[12].strip() if (answers[12].strip() and answers[12].strip()!= '-1') else \"'?'\"\n \n except IOError:\n node.age = \"'?'\"\n node.academies = \"'?'\"\n node.waylength = \"'?'\"\n node.hiking = \"'?'\"\n node.lake = \"'?'\"\n node.choir = \"'?'\"\n node.games = \"'?'\"\n node.drinks = \"'?'\"\n node.sleep = \"'?'\"\n node.number = \"'?'\"\n node.hotness = \"'?'\"\n node.hookups = \"'?'\"\n node.description = \"?\"\n\n with open('nodes-and-edges.js', 'w+') as f:\n\n # Write the code for the Nodes to the file\n # This is just the preamble\n f.write('// The nodes for the graph \\n')\n f.write('var nodes = [ \\n')\n\n # And these are the actual data\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n pos = xy_from_group(node.group)\n f.write('\\t{{ id: {id}, '\n 'label: \"{random_number}\", '\n 'title: \"<small style=\\'font-family: Roboto Slab;\\'>'\n# 'Name: {label} <br>'\n# 'Fach: {major} <br>'\n 'AG: {group} <br>'\n '---<br>'\n 'Kennt {knows} Leute <br>'\n 'Wird gekannt von {known_by} Leuten <br>'\n '---<br>'\n 'Alter: {age} <br>'\n 'Anzahl Sommerakademien: {academies} <br>'\n 'Anfahrtsdauer: {waylength} <br>'\n 'Wander-Tage: {hiking} <br>'\n 'See-Tage: {lake} <br>'\n 'Chor-Tage: {choir} <br>'\n 'Spieleabende: {games} <br>'\n 'Beitrag zur Barkasse: {drinks} <br>'\n 'Schlaf pro Nacht: {sleep} <br>'\n 'Lieblingszahl: {number} <br>'\n 'Eigene Attraktivität: {hotness} <br>'\n 'Hookup-Schätzung: {hookups} <br>'\n 'Neubeuern in einem Wort: {description}'\n '</small>\", '\n 'value: {value}, '\n 'group: {group}, '\n 'knows: {knows}, '\n 'known_by: {known_by}, '\n 'x: {x}, '\n 'y: {y}, '\n 'color: {{ border: \"{border}\", '\n 'background: \"{background}\", '\n 'highlight: {{ border: \"{border}\", '\n 'background: \"{background}\" }} }}, '\n 'original_color: {{ border: \"{border}\", '\n 'background: \"{background}\", '\n 'highlight: {{ border: \"{border}\", '\n 'background: \"{background}\" }} }}, '\n 'age: {age}, '\n 'academies: {academies}, '\n 'waylength: {waylength}, '\n 'hiking: {hiking}, '\n 'lake: {lake}, '\n 'choir: {choir}, '\n 'games: {games}, '\n 'drinks: {drinks}, '\n 'sleep: {sleep}, '\n 'number: {number}, '\n 'hotness: {hotness}, '\n 'hookups: {hookups}, '\n 'description: \"{description}\" }},\\n'\n .format(id=node.id,\n random_number=random_numbers[node.id],\n label=node.name,\n major=node.major,\n group=node.group,\n x=pos[0],\n y=pos[1],\n knows=node.knows,\n known_by=node.known_by,\n value=node.known_by,\n border=DarkColor(int(node.group)),\n background=LightColor(int(node.group)),\n age=node.age,\n academies=node.academies,\n waylength=node.waylength,\n hiking=node.hiking,\n lake=node.lake,\n choir=node.choir,\n games=node.games,\n drinks=node.drinks,\n sleep=node.sleep,\n number=node.number,\n hotness=node.hotness,\n hookups=node.hookups,\n description=node.description))\n\n # Close the Node array properly\n f.write(']; \\n\\n\\n')\n\n # Create the edges...\n f.write('var edges = [\\n')\n\n # Now loop over the adjacency matrix to calculate the edges\n n_people = len(adjacency_matrix_)\n id = 0\n for row in range(n_people):\n for col in range(row):\n\n # CASE 1: Both people said they know each other.\n # We draw an undirected edge between them\n if adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, row)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, row, col, color, color))\n id += 1\n\n # CASE 2: Person in row knows person in col, but not vice versa\n if adjacency_matrix_[row, col] and not adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, row)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, arrows: \"to\", '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, row, col, color, color))\n id += 1\n\n # CASE 3: Person in col knows person in row, but not vice versa\n if not adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, col)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, arrows: \"to\", '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, col, row, color, color))\n id += 1\n\n # Close the Edges array properly\n f.write('];')\n\n print 'Created nodes-and-edges.js!'",
"def __init__(self, nodes):\n self.parents = {}\n self.ranks = {}\n\n for node in nodes:\n self.parents[node] = node\n self.ranks[node] = 0",
"def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths",
"def dictize(self):\n dict = {}\n for node in self.sort():\n logger.debug(\"Dictize: id %s has name %s\" % (node._id, node.name))\n x = node._kwargs()\n dict[node._id]={\"klass\":node.__class__.__name__, \n \"kwargs\": x,\n \"children\":[child._id for child in node.children()]}\n return dict",
"def node_mapping(self):\n ...",
"def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes",
"def get_question_answers(self):\r\n # dict of (id, correct_answer)\r\n answer_map = dict()\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_map.update(results)\r\n\r\n # include solutions from <solution>...</solution> stanzas\r\n for entry in self.tree.xpath(\"//\" + \"|//\".join(solution_tags)):\r\n answer = etree.tostring(entry)\r\n if answer:\r\n answer_map[entry.get('id')] = contextualize_text(answer, self.context)\r\n\r\n log.debug('answer_map = %s', answer_map)\r\n return answer_map",
"def assign_no_to_node(self,list):\n list = sorted(list)\n d = {}\n for i,node in enumerate(list):\n #print i,node\n d[node] = i \n return d,len(d)",
"def process_answer(ans):\n\n #TODO: check whether need type coversion?\n ans['parentid'] = int(ans['parentid'])\n ## I remain comments here, maybe can do some sentiment analysis to evaluate score of answer\n return ans",
"def _init_catalog_node(catalog, pid, lid=None, rid=None):\n if pid not in catalog: catalog[pid] = {'_langs': {}}\n if lid is not None:\n if lid not in catalog[pid]['_langs']: catalog[pid]['_langs'][lid] = {'_res': {}, 'language': {}}\n if lid is not None and rid is not None:\n if rid not in catalog[pid]['_langs'][lid]['_res']: catalog[pid]['_langs'][lid]['_res'][rid] = {}",
"def create_nodes(self):\n # Create a special dictionary that will raise an error if a key is\n # updated. This avoids the\n nodes = NodeDict()\n\n return create_solph_nodes_from_data(self.input_data, nodes)",
"def get_mapped_answers(self):\r\n answers = (\r\n dict([(ie.get('id'), ie.get(\r\n 'rectangle')) for ie in self.ielements]),\r\n dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))\r\n return answers",
"def make_dict(\n nn,\n q_id,\n polarity,\n context_cond,\n cat,\n subcat,\n answer_info,\n bias_targets,\n version,\n notes,\n context,\n question,\n ans_list,\n ans_place,\n):\n this_dict = {\n \"example_id\": nn,\n \"question_index\": q_id,\n \"question_polarity\": polarity,\n \"context_condition\": context_cond,\n \"category\": cat,\n \"answer_info\": answer_info,\n \"additional_metadata\": {\n \"subcategory\": subcat,\n \"stereotyped_groups\": bias_targets,\n \"version\": version,\n \"source\": notes,\n },\n \"context\": context.strip(),\n \"question\": question.strip(),\n \"ans0\": ans_list[0],\n \"ans1\": ans_list[1],\n \"ans2\": ans_list[2],\n \"label\": ans_place,\n }\n return this_dict",
"def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])",
"def __init__(self):\n self.head = Node(float('-inf'))\n self.tail = Node(float('inf'))\n self.head.next = self.tail\n self.tail.prev = self.head\n # value 1, 2, 3, key: hello or abc\n self.cntKey = {}\n # key : hello or abc, cnt value 1, 2, 3\n self.keyCnt = {}",
"def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)",
"def get_my_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question FROM question WHERE question.user_id = (%s) ORDER BY create_time DESC \", user_id)\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id,answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id WHERE question.user_id =(%s)\", user_id)\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=user_id)) for q in questions}\n\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()",
"def parser(self, answer):\n result = {}\n for rrsets in answer.response.answer:\n for item in rrsets.items:\n rdtype = self.get_type_name(item.rdtype)\n\n if item.rdtype == self.get_type_id('A'):\n if result.has_key(rdtype):\n result[rdtype].append(item.address)\n else:\n result[rdtype] = [item.address]\n return result",
"def display_possible_answers(question):\n answers = question['incorrect'] + [question['correct']]\n random.shuffle(answers)\n answer_dict = {}\n for i, answer in enumerate(answers):\n answer_dict[str(i + 1)] = answer\n print(f\"{i + 1}: {answer}\\n\")\n return answer_dict",
"def node_info(self) -> dict:\r\n location_str = f\"{self.location[0]},{str(self.location[1])},{str(self.location[2])}\"\r\n return {\"id\": self.key, \"pos\": location_str}",
"def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()",
"def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict",
"def all_in_edges_of_node(self, id1: int) -> dict:\n return self.edges_in[id1]",
"def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests"
] | [
"0.5874153",
"0.5803899",
"0.5788026",
"0.5769955",
"0.56848216",
"0.56081706",
"0.5563815",
"0.55347794",
"0.5533629",
"0.55147934",
"0.5420318",
"0.53809404",
"0.53226113",
"0.53205335",
"0.52443105",
"0.52339405",
"0.5148043",
"0.5130552",
"0.5121005",
"0.5114256",
"0.5098566",
"0.50605094",
"0.5052158",
"0.50452065",
"0.5038137",
"0.50350416",
"0.50346714",
"0.5018171",
"0.501794",
"0.5008823"
] | 0.60742897 | 0 |
Runs through all of the nodes in the json responses storing the intersection and set differences into a dictonary organized by tuples of node ids or the tuple (1, 1) for all nodes. | def node_diff(self):
if self.input1 is None or self.input2 is None:
raise Exception("Missing input: please run the populate() method first")
if self.node_dict1 is None or self.node_dict2 is None:
self.make_node_dict()
# Initialize dictonaries to keep track of the nodes in respnse 1 and response 2
g1={}
g2={}
# Set to keep track of the union of all curie ids
curie_set = set()
for curie in self.node_dict1.keys():
g1[curie] = {}
# intersection is only in the g1 dictionary
g1[curie]['intersection'] = set()
# node section keeps track of node ids associated with each node i.e. "n0"
g1[curie]['node'] = set()
curie_set.add(curie)
for curie in self.node_dict2.keys():
g2[curie] = {}
# node section keeps track of node ids associated with each node i.e. "n0"
g2[curie]['node'] = set()
curie_set.add(curie)
node_names1 = []
node_names2 = []
# extract all node ids (i.e. "n0","n1",ect...)
if len(self.input1['question_graph']['nodes'])>0:
if 'id' in self.input1['question_graph']['nodes'][0]:
node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]
elif 'node_id' in self.input1['question_graph']['nodes'][0]:
node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]
if len(self.input2['question_graph']['nodes'])>0:
if 'id' in self.input2['question_graph']['nodes'][0]:
node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]
elif 'node_id' in self.input2['question_graph']['nodes'][0]:
node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]
# initialize the result dictonary
diff_dict = {}
diff_dict["-1|-1"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}
# initialize node id tuple keys
for id1 in node_names1:
for id2 in node_names2:
diff_dict[id1+"|"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}
# iterate through answers
for answer1 in self.input1['answers']:
for answer2 in self.input2['answers']:
for id1 in answer1['node_bindings'].keys():
# This is to handle cases where answer node id has a list or a string
if isinstance(answer1['node_bindings'][id1], str):
bindings1 = [answer1['node_bindings'][id1]]
elif isinstance(answer1['node_bindings'][id1], list):
bindings1 = answer1['node_bindings'][id1]
for curie1 in bindings1:
# store node id
g1[curie1]['node'].add(id1)
for id2 in answer2['node_bindings'].keys():
# This is to handle cases where answer node id has a list or a string
if isinstance(answer2['node_bindings'][id2], str):
bindings2 = [answer2['node_bindings'][id2]]
elif isinstance(answer2['node_bindings'][id2], list):
bindings2 = answer2['node_bindings'][id2]
for curie2 in bindings2:
# store node id
g2[curie2]['node'].add(id2)
if curie1 == curie2:
# stor intersection tuple
g1[curie1]['intersection'].add(id1+"|"+id2)
# iterate through all curies
for curie in curie_set:
# check if curie is from answer 1
if curie in g1.keys():
# check if in intersection
if len(g1[curie]['intersection'])>0:
diff_dict["-1|-1"]['intersection'] += [self.node_dict1[curie]]
for id1 in node_names1:
for id2 in node_names2:
node_tuple = id1+"|"+id2
if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:
diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]
elif id1 in g1[curie]['node']:
diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]
elif id2 in g2[curie]['node']:
diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]
# If not in intersection store in g1-g2
else:
diff_dict["-1|-1"]['g1-g2'] += [self.node_dict1[curie]]
for id1 in g1[curie]['node']:
# iterate through all answer 2 ids
for id2 in node_names2:
diff_dict[id1+"|"+id2]['g1-g2'] += [self.node_dict1[curie]]
# if not in g1 but in g2 then in g2-g1
elif curie in g2.keys():
diff_dict["-1|-1"]['g2-g1'] += [self.node_dict2[curie]]
for id2 in g2[curie]['node']:
# iterate through all answer 1 ids
for id1 in node_names1:
diff_dict[id1+"|"+id2]['g2-g1'] += [self.node_dict2[curie]]
return diff_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dict_sets_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n if part != union_part and union_part not in finished:\n data[part].intersection(data[union_part])\n finished.append(part)\n\n return data_info",
"def dict_un_lists_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n union = []\n if part != union_part and union_part not in finished:\n for node in data[part]:\n if node in data[union_part]:\n union.append(node)\n finished.append(part)\n\n return data_info",
"def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node",
"def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result",
"def test_intersection_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_intersection_of.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0000082__GO:0044843__\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000082\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0044843\")\n self.assertEqual(entry[\"intersection_type\"], \"\")\n if entry[\"id\"] == \"GO:0000082__GO:0000278__part_of\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000082\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0000278\")\n self.assertEqual(entry[\"intersection_type\"], \"part_of\")",
"def test_intersection(self, client):\n\n expected = {\n 'a': [0,2,4,6,8],\n 'b': [4,6,8,10,12,14,16],\n 'result': [4,6,8]\n }\n\n res = client.post('/api/v1/intersection', json={'a': expected['a'], 'b': expected['b'] })\n assert res.status_code == 200\n assert res.json['data'] == expected['result']\n assert res.json['status'] == 2000",
"def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges",
"def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1",
"def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict",
"def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set",
"def alldiff():\n res = {'Computation-alldiff-0': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 1, 'z': 4},\n 'Results': {'f1': 15, 'f2': 51}},\n 'Computation-alldiff-1': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 1, 'z': 4},\n 'Results': {'f1': 16, 'f2': 61}},\n 'Computation-alldiff-2': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 2, 'z': 4},\n 'Results': {'f1': 25, 'f2': 52}},\n 'Computation-alldiff-3': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 2, 'z': 4},\n 'Results': {'f1': 26, 'f2': 62}},\n 'Computation-alldiff-4': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 3, 'z': 4},\n 'Results': {'f1': 35, 'f2': 53}},\n 'Computation-alldiff-5': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 3, 'z': 4},\n 'Results': {'f1': 36, 'f2': 63}}}\n\n # Notice the ordering\n domain = {'x':[\"1\", \"2\", \"3\"], 'w':[\"5\", \"6\"]}\n metadata = {'z':\"4\"}\n parameters = [\"x\", \"w\"]\n parameters.sort()\n metrics = [\"f1\", \"f2\"]\n metrics.sort()\n exp_name = \"alldiff\"\n return exp_name, metadata, parameters, domain, metrics, res",
"def all_in_edges_of_node(self, id1: int) -> dict:\n if id1 in self.Nodes:\n ans = {}\n for i, j in self.Edges.items():\n if id1 in j:\n ans[i] = j[id1]\n return ans\n return {}",
"def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict",
"def _get_intersections():\n with _get_mongo_client() as client:\n coll = client[mongo_database]['locations']\n return coll.find({'intersection_number': {'$exists': True}}, {'_id': False})",
"def differentNodesForNode(ntupleSet,nodeList,verbose=False):\n nodesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n for nodeInTuple in ntuple:\n nodesPerNode[nodeInTuple].extend(ntuple)\n \n for a,v in nodesPerNode.iteritems():\n nodesPerNode[a] = set(v)\n \n return nodesPerNode",
"def results(self) -> Dict[str, Any]:\n return self.nodes",
"def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes",
"def process_data(self, json_dict: dict):\n all_token_ids = []\n all_level_ids = []\n all_synset_ids = []\n all_lemma_ids = []\n all_is_highway = []\n all_targets = []\n\n def tokenize(lemma_):\n return self.tokenizer(\n lemma_,\n add_special_tokens=False,\n truncation=True,\n is_split_into_words=True,\n return_token_type_ids=False,\n ).input_ids\n\n def add_lemma(lemma_, abs_level_, synset_id_, is_highway_):\n lemma_token_ids = tokenize([lemma_])\n n_tokens_ = len(lemma_token_ids)\n token_ids.extend(lemma_token_ids)\n level_ids.extend([self.level_to_id[abs_level_]] * n_tokens_)\n synset_ids.extend([synset_id_] * n_tokens_)\n lemma_ids.extend([lemma_ids[-1] + 1] * n_tokens_)\n is_highway.extend([is_highway_] * n_tokens_)\n\n # Go through all JSON entries\n for synset in tqdm(json_dict.values()):\n token_ids = []\n level_ids = []\n synset_ids = [0]\n lemma_ids = [0]\n is_highway = []\n\n lemmas = [l.replace(\"_\", \" \") for l in synset[\"lemmas\"]]\n abs_level = (\"current\", \"current\")\n\n # Save all lemmas of the current node\n synset_token_ids = self.tokenizer.batch_encode_plus(lemmas,\n add_special_tokens=False,\n return_token_type_ids=False).input_ids\n all_targets.append(synset_token_ids)\n\n for level in (\"hypernyms\", \"hyponyms\"):\n for sub_synset in synset[level].values():\n if \"lemmas\" in sub_synset:\n lemmas = [l.replace(\"_\", \" \") for l in sub_synset[\"lemmas\"]]\n abs_level = (level, \"current\")\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n for sub_level in (\"hypernyms\", \"hyponyms\"):\n for sub_sub_lemmas in sub_synset[sub_level].values():\n lemmas = [l.replace(\"_\", \" \") for l in sub_sub_lemmas]\n abs_level = (level, sub_level)\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n # Append the global lists\n all_token_ids.append(token_ids)\n all_level_ids.append(level_ids)\n all_synset_ids.append(synset_ids[1:])\n all_lemma_ids.append(lemma_ids[1:])\n all_is_highway.append(is_highway)\n\n data = (\n all_token_ids,\n all_level_ids,\n all_synset_ids,\n all_lemma_ids,\n all_is_highway,\n all_targets\n )\n\n return data",
"def transform_response_for_loading(response, schema, test_execute_start_time=None):\n if not test_execute_start_time:\n test_execute_start_time = datetime.utcnow()\n\n # flatten the dictionaries and add to flats list\n flats: list = list()\n nodes: list = nl('nodes', response)\n node_list: list = nodes[0]\n for unique_node in node_list:\n\n flat: dict = dict()\n v2_schema = [i[0] for i in schema]\n for key in v2_schema:\n flat.setdefault(key, '')\n flat['node_updateTimestamp'] = unique_node['updateTimestamp']\n flat['node_id'] = unique_node['id']\n\n dimensions: dict = dict()\n try:\n dimensions = unique_node['staticData']['dimensions']\n except KeyError:\n pass # handle with check for value below\n\n if dimensions:\n for key, value in dimensions.items():\n if not value:\n value = ''\n flat[key] = value\n\n # vessel in node\n vessel: dict = unique_node['staticData']\n for k, v in vessel.items():\n if k == \"updateTimestamp\":\n flat[\"staticData_updateTimestamp\"] = v\n elif k == 'timestamp':\n flat['staticData_timestamp'] = v\n else:\n if not k == 'dimensions':\n if not v:\n v = ''\n flat[k] = v\n\n # lastPositionUpdate in node\n lastPositionUpdate: dict = dict()\n try:\n lastPositionUpdate: dict = unique_node['lastPositionUpdate']\n except BaseException as e:\n logger.error(e)\n logger.error(\"Could be there is no lastPositionUpdate\")\n\n if lastPositionUpdate:\n for k, v in lastPositionUpdate.items():\n if k == \"updateTimestamp\":\n flat[\"lastPositionUpdate_updateTimestamp\"] = v\n elif k == 'timestamp':\n flat['lastPositionUpdate_timestamp'] = v\n else:\n if not v:\n v = ''\n flat[k] = v\n\n # currentVoyage in node\n currentVoyage: dict = dict()\n try:\n currentVoyage = unique_node['currentVoyage']\n except BaseException as e:\n logger.error(e)\n logger.error(\"Could be there is no currentVoyage\")\n if currentVoyage:\n for k, v in currentVoyage.items():\n if k == \"updateTimestamp\":\n flat['currentVoyage_updateTimestamp'] = v\n elif k == 'timestamp':\n flat['currentVoyage_timestamp'] = v\n elif k == 'matchedPort':\n try:\n flat['matchedPort_matchScore'] = currentVoyage['matchedPort']['matchScore']\n except (KeyError, TypeError):\n continue\n\n port: dict = dict()\n try:\n port = currentVoyage['port']\n except (KeyError, TypeError):\n continue\n centerPoint: dict = dict()\n try:\n centerPoint = port['centerPoint']\n\n except (KeyError, TypeError):\n continue\n if centerPoint:\n flat['port_name'] = centerPoint['matchedPort']['name']\n flat['port_unlocode'] = centerPoint['matchedPort']['unlocode']\n latitude = centerPoint['latitude']\n longitude = centerPoint['longitude']\n flat['matchedPort_latitude'] = latitude\n flat['matchedPort_long'] = longitude\n else:\n if not v:\n v = ''\n flat[k] = v\n try:\n # in case somehow these got into the dictionary\n del flat['dimensions']\n del flat['currentVoyage']\n del flat['matchedPort']\n except KeyError:\n pass\n flats.append(flat)\n return flats",
"def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}",
"def get_incident_nodes(self):\n # return the set of incident edges\n return \\\n {\n self.first_incident_node,\n self.second_incident_node\n }",
"def get_structs(self, ignore=\"^(rel_|frame_)\"):\n triples = list(self.get_triples())\n # get nodes per predicate and roles per nodes\n predicates = {} # node : predicate (set of nodes)\n roles = defaultdict(list) # subject_node : [rel]\n nodes = {} # id : node\n for s,p,o in triples:\n sid, oid = int(s.id), int(o.id)\n nodes[sid] = s\n nodes[oid] = o\n if p == \"pred\":\n pred =(predicates.get(sid, {sid}) |\n predicates.get(oid, {oid}))\n for node in pred:\n predicates[node] = pred\n elif not re.match(ignore, p):\n if sid not in predicates:\n predicates[sid] = {sid}\n roles[sid].append((p, oid))\n # output a dict per predicate with nodes per role\n for pnodes in set(map(tuple, predicates.values())):\n pid = sorted(pnodes)[0]\n result = defaultdict(list) # list of nodes per role\n for node in pnodes:\n result['predicate'].append(nodes[node])\n for p, oid in roles[node]:\n node_ids = self.get_descendants(oid, triples, ignore=ignore)\n result[p] += [nodes[n] for n in node_ids]\n yield dict(result.iteritems()) # convert to regular dict",
"def get_common():\n body: t.Any = request.json\n check_error({'input': {'first': {}, 'second': {}}}, body)\n response_first = rpc_search({'input': body['input']['first']})\n response_second = rpc_search({'input': body['input']['second']})\n\n modules_first = response_first['yang-catalog:modules']['module']\n modules_second = response_second['yang-catalog:modules']['module']\n\n if len(modules_first) == 0 or len(modules_second) == 0:\n abort(404, description='No hits found either in first or second input')\n\n output_modules_list = []\n names = []\n for mod_first in modules_first:\n for mod_second in modules_second:\n if mod_first['name'] == mod_second['name']:\n if mod_first['name'] not in names:\n names.append(mod_first['name'])\n output_modules_list.append(mod_first)\n if len(output_modules_list) == 0:\n abort(404, description='No common modules found within provided input')\n return {'output': output_modules_list}",
"def all_ids(self) -> Set[int]:\n return {node_id for _, (node_id, _) in self.nodes.items()}",
"def get_hits(nodes: Dict[int, PhyloNode], rank: str, taxids: List[int]) -> Dict[int, int]:\n\n hits = {}\n for taxid in taxids:\n if taxid not in nodes:\n continue\n hit = get_ancestor_of_rank(nodes[taxid], rank)\n # pigeonhole ancestors of taxons\n if not hit:\n continue\n if hit in hits:\n hits[hit] += 1\n else:\n hits[hit] = 1\n return hits",
"def common_peers(self, i, j):\n ir = self.get(i, self.router.network)\n jr = self.get(j, self.router.network)\n \n if not ir or not jr:\n return []\n\n ir = [tuple(p['node']) for p in ir if p['transactions']]\n jr = [tuple(p['node']) for p in jr if p['transactions']]\n\n result = list(set(ir).intersection(jr))\n log(\"cmn: %s %s %i: %s\" % (i, j, len(result), result))\n return result",
"def dict_get_nodekeys_recursive(d):\n nodekeys = set(d.keys())\n for nk in nodekeys:\n # print \"nodekey\", nk\n # print \"graphkeys\", d[nk]['params'].keys()\n if 'graph' in d[nk]['params']:\n # print \"graphkeys\", d[nk]['params']['graph'].keys()\n nodekeys = nodekeys.union(dict_get_nodekeys_recursive(d[nk]['params']['graph']))\n return nodekeys",
"def intersection(llist_1, llist_2):\n hashmap = {}\n return_linked_list = LinkedList()\n node = llist_1.get_head()\n while node:\n hashmap[node.get_value()] = 0\n node = node.get_next()\n node = llist_2.get_head()\n while node:\n if node.get_value() in hashmap:\n if hashmap[node.get_value()] == 1:\n node= node.get_next()\n continue\n\n return_linked_list.append(node.get_value())\n hashmap[node.get_value()] = 1\n node = node.get_next()\n if return_linked_list.size() == 0:\n return 'No intersections found'\n return return_linked_list",
"def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict",
"def intersection(arrays):\n # Your code here\n hash = {}\n hash2 = {}\n for i in range(len(arrays[0])):\n hash[arrays[0][i]] = i\n\n for key in hash:\n if key in arrays[1]:\n hash2[key] = hash[key]\n print(hash2)\n \n for i in range(2, len(arrays)):\n for key in hash2:\n if key not in arrays[i]:\n hash2[key] = None\n\n list1 = [key for key in hash2 if hash2[key] != None] \n result = list1\n\n return result"
] | [
"0.57611847",
"0.57074016",
"0.5648599",
"0.5608913",
"0.55996966",
"0.54121363",
"0.539312",
"0.5247284",
"0.5185026",
"0.51842594",
"0.51647484",
"0.51572686",
"0.51484066",
"0.5144395",
"0.5093303",
"0.5067902",
"0.5056289",
"0.5055382",
"0.5027136",
"0.5016611",
"0.50049776",
"0.49992225",
"0.4978968",
"0.49493864",
"0.49396846",
"0.49309868",
"0.49214655",
"0.4914161",
"0.48999932",
"0.48992977"
] | 0.6736763 | 0 |
Reproducing kernel Calculate the inverse FunkRadon transform of reproducing kernel for the space of spherical harmonics of maximum degree N. | def inv_funk_radon_kernel(mu, N):
# Check that -1 <= mu <= 1
mu = np.clip(mu, -1, 1)
# Need Legendre polynomials
legPolys = legp(mu, N)
p_at_zero = legp(0, N)
coefs = 2*np.arange(0, N+1, 2) + 1
ker = coefs*legPolys[::2]/p_at_zero[::2]
return ker.sum() / (8*np.pi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)",
"def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)",
"def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)",
"def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi",
"def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()",
"def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image",
"def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]",
"def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A",
"def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org",
"def cs4243_filter_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n filtered_image[i, j] = np.multiply(kernel, recep_area).sum()\n ###\n\n return filtered_image",
"def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image",
"def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05",
"def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]",
"def n2f(n):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return np.sqrt(n/k)",
"def f2n(f):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return k * f**2",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def DisLayerSN(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out",
"def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel",
"def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W",
"def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij",
"def GenLayerSN(ngf, k):\n d_in = 2**k \n d_out = 2**(k-1)\n out = nn.Sequential( nn.utils.spectral_norm(\n nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False)),\n nn.BatchNorm2d(ngf * d_out),\n nn.ReLU(True) )\n return out",
"def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)"
] | [
"0.7213641",
"0.63983333",
"0.61867654",
"0.61385816",
"0.61052585",
"0.60678124",
"0.6047397",
"0.59877527",
"0.598379",
"0.59239495",
"0.584012",
"0.5723425",
"0.57071584",
"0.5696312",
"0.55959344",
"0.556116",
"0.5512384",
"0.5488459",
"0.5458828",
"0.5453336",
"0.543654",
"0.5372929",
"0.53425217",
"0.5331985",
"0.53317183",
"0.5323391",
"0.5307182",
"0.5300855",
"0.52993053",
"0.5298185"
] | 0.7255298 | 0 |
Reproducing kernel Calculate inverse FunkRadon transform and inverse spherical Laplacian of reproducing kernel for even degree subspace of spherical harmonics of maximum degree N, i.e., calculates H(\mu) = \Delta^1 G^1 K_e(\mu), where \Delta is the spherical Laplacian and G is the FunkRadon transporm. The calculation is done in spectral space. | def inv_funk_radon_even_kernel(mu, N):
# Check that -1 <= mu <= 1
mu = np.clip(mu, -1, 1)
# Need Legendre polynomials
legPolys = legp(mu, N)
p_at_zero = legp(0, N)
coefs_num = 2*np.arange(0, N+1) + 1
coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)
ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)
return ker.sum() / (8.0*np.pi*np.pi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)",
"def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)",
"def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)",
"def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)",
"def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A",
"def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p",
"def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org",
"def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI",
"def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi",
"def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij",
"def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()",
"def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org",
"def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])",
"def kramers_kronig_hs(deltaE, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5,\n full_output=True, prints = np.array([]), correct_S_s = False):\n output = {}\n # Constants and units\n me = 511.06\n\n e0 = 200 # keV\n beta =30 #mrad\n\n eaxis = deltaE[deltaE>0] #axis.axis.copy()\n ddeltaE = (np.max(deltaE) - np.min(deltaE))/(len(deltaE - 1))\n S_E = I_EELS[deltaE>0]\n y = I_EELS[deltaE>0]\n l = len(eaxis)\n i0 = N_ZLP\n \n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / ddeltaE#axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im/eaxis)*ddeltaE \n K = (K / (np.pi / 2) / (1 - 1. / n ** 2))\n te = (332.5 * K * ke / i0)\n if full_output is True:\n output['thickness'] = te\n elif t is not None:\n if N_ZLP is None:\n raise ValueError(\"The ZLP must be provided when the \"\n \"thickness is used for normalization.\")\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2*l) #2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * ddeltaE #axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint<0] = 0\n Srfint[Srfint>S_E] = S_E[Srfint>S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n output['S_s'] = Srfint\n del Srfint\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n if full_output is False:\n return eps\n else:\n return eps, output",
"def KFilt(sample,fs=25):\n\t#kalman filter inputs\n \n # Dimensions of parameters:\n # 'transition_matrices': 2,\n # 'transition_offsets': 1,\n # 'observation_matrices': 2,\n # 'observation_offsets': 1,\n # 'transition_covariance': 2,\n # 'observation_covariance': 2,\n # 'initial_state_mean': 1,\n # 'initial_state_covariance': 2,\n \n n_timesteps = len(sample)\n trans_mat = []\n\n\t#mask missing values\n observations = np.ma.array(sample,mask=np.zeros(sample.shape))\n missing_loc = np.where(np.isnan(sample))\n observations[missing_loc[0][:],missing_loc[1][:]] = np.ma.masked\n\t\n\t#Import Kalman filter, inerpolate missing points and get 2nd, 3rd orde kinematics\n dt = 1./25\t#Length of each frame (should be iether 1/25 or 1/30)\t\n n_timesteps = len(sample)\n \n observation_matrix = np.array([[1,0,0,0],\n [0,1,0,0]])#np.eye(4) \n t = np.linspace(0,len(observations)*dt,len(observations))\n q = np.cov(observations.T[:2,:400])\n qdot = np.cov(np.diff(observations.T[:2,:400]))#np.cov(observations[:1,:400])\n\n h=(t[-1]-t[0])/t.shape[0]\n A=np.array([[1,0,h,.5*h**2], \n [0,1,0,h], \n [0,0,1,0],\n [0,0,0,1]]) \n\n init_mean = [sample[0],0,0] #initial mean should be close to the first point, esp if first point is human-picked and tracking starts at the beginning of a video\n observation_covariance = q*500 #ADJUST THIS TO CHANGE SMOOTHNESS OF FILTER\n init_cov = np.eye(4)*.001#*0.0026\n transition_matrix = A\n transition_covariance = np.array([[q[0,0],q[0,1],0,0],\n [q[1,0],q[1,1],0,0],\n [0,0,qdot[0,0],qdot[0,1]],\n [0,0,qdot[1,0],qdot[1,1]]])\n\n kf = KalmanFilter(transition_matrix, observation_matrix,transition_covariance,observation_covariance,n_dim_obs=2)\n\n kf = kf.em(observations,n_iter=1,em_vars=['transition_covariance','transition_matrix','observation_covariance'])\n\n #pdb.set_trace()\n \n global trans_mat, trans_cov, init_cond\n x_filt = kf.filter(observations[0])[0]#observations.T[0])[0]\n kf_means = kf.smooth(observations[0])[0]\n\t\n return kf_means,x_filt #np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1])),frames",
"def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian",
"def kramers_kronig_hs(self, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5, correct_S_s=False):\n output = {}\n # Constants and units\n me = 511.06\n\n\n e0 = self.e0\n beta = self.beta\n\n eaxis = self.deltaE[self.deltaE > 0] # axis.axis.copy()\n S_E = I_EELS[self.deltaE > 0]\n y = I_EELS[self.deltaE > 0]\n l = len(eaxis)\n i0 = N_ZLP\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2 #m0 v**2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me) #me c**2 / (hbar c) gamma sqrt(2Ekin /(me c**2))\n \n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / self.ddeltaE # axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im / eaxis) * self.ddeltaE\n K = K / (np.pi / 2) / (1 - 1. / n ** 2)\n te = (332.5 * K * ke / i0)\n \n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2 * l) # 2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize #TODO : min twee?????\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1 #TODO: plus 1???\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None: #TODO: loop weghalen.\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * self.ddeltaE # axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint < 0] = 0\n Srfint[Srfint > S_E] = S_E[Srfint > S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n\n return eps, te, Srfint",
"def weiner_tf(H, K):\r\n\r\n W = (1 / H) * ((np.conjugate(H) * H) / ((np.conjugate(H) * H) + K))\r\n return W",
"def folded_voigt_kernel_logst(k,log_nstbeta,log_ngammaL,dLarray):\n\n beta=jnp.exp(log_nstbeta)\n gammaL=jnp.exp(log_ngammaL)\n def ffold(val,dL):\n val=val+jnp.exp(-2.0*((jnp.pi*beta*(k[:,None]+dL))**2 \\\n + jnp.pi*gammaL[None,:]*(k[:,None]+dL)))\n val=val+jnp.exp(-2.0*((jnp.pi*beta*(k[:,None]-dL))**2 \\\n + jnp.pi*gammaL[None,:]*(dL-k[:,None])))\n null=0.0\n return val, null\n val=jnp.exp(-2.0*((jnp.pi*beta*k[:,None])**2 + jnp.pi*gammaL[None,:]*k[:,None]))\n \n val,nullstack=scan(ffold, val, dLarray)\n \n return val",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image",
"def rasm_mode(self, K, MAX_ITER=40):\r\n #old_Ki_f = np.zeros((self.N, 1))\r\n\r\n #Start f's at zero originally of if we have gone off track, try restarting\r\n if self.old_Ki_f is None or self.bad_fhat:\r\n old_Ki_f = np.random.rand(self.N, 1)/50.0\r\n #old_Ki_f = self.Y\r\n f = np.dot(K, old_Ki_f)\r\n else:\r\n #Start at the old best point\r\n old_Ki_f = self.old_Ki_f.copy()\r\n f = self.f_hat.copy()\r\n\r\n new_obj = -np.inf\r\n old_obj = np.inf\r\n\r\n def obj(Ki_f, f):\r\n return -0.5*np.dot(Ki_f.T, f) + self.noise_model.logpdf(f, self.data, extra_data=self.extra_data)\r\n\r\n difference = np.inf\r\n epsilon = 1e-7\r\n #step_size = 1\r\n #rs = 0\r\n i = 0\r\n\r\n while difference > epsilon and i < MAX_ITER:\r\n W = -self.noise_model.d2logpdf_df2(f, self.data, extra_data=self.extra_data)\r\n\r\n W_f = W*f\r\n grad = self.noise_model.dlogpdf_df(f, self.data, extra_data=self.extra_data)\r\n\r\n b = W_f + grad\r\n W12BiW12Kb, _ = self._compute_B_statistics(K, W.copy(), np.dot(K, b))\r\n\r\n #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet\r\n full_step_Ki_f = b - W12BiW12Kb\r\n dKi_f = full_step_Ki_f - old_Ki_f\r\n\r\n f_old = f.copy()\r\n def inner_obj(step_size, old_Ki_f, dKi_f, K):\r\n Ki_f = old_Ki_f + step_size*dKi_f\r\n f = np.dot(K, Ki_f)\r\n # This is nasty, need to set something within an optimization though\r\n self.tmp_Ki_f = Ki_f.copy()\r\n self.tmp_f = f.copy()\r\n return -obj(Ki_f, f)\r\n\r\n i_o = partial_func(inner_obj, old_Ki_f=old_Ki_f, dKi_f=dKi_f, K=K)\r\n #Find the stepsize that minimizes the objective function using a brent line search\r\n #The tolerance and maxiter matter for speed! Seems to be best to keep them low and make more full\r\n #steps than get this exact then make a step, if B was bigger it might be the other way around though\r\n #new_obj = sp.optimize.minimize_scalar(i_o, method='brent', tol=1e-4, options={'maxiter':5}).fun\r\n new_obj = sp.optimize.brent(i_o, tol=1e-4, maxiter=10)\r\n f = self.tmp_f.copy()\r\n Ki_f = self.tmp_Ki_f.copy()\r\n\r\n #Optimize without linesearch\r\n #f_old = f.copy()\r\n #update_passed = False\r\n #while not update_passed:\r\n #Ki_f = old_Ki_f + step_size*dKi_f\r\n #f = np.dot(K, Ki_f)\r\n\r\n #old_obj = new_obj\r\n #new_obj = obj(Ki_f, f)\r\n #difference = new_obj - old_obj\r\n ##print \"difference: \",difference\r\n #if difference < 0:\r\n ##print \"Objective function rose\", np.float(difference)\r\n ##If the objective function isn't rising, restart optimization\r\n #step_size *= 0.8\r\n ##print \"Reducing step-size to {ss:.3} and restarting optimization\".format(ss=step_size)\r\n ##objective function isn't increasing, try reducing step size\r\n #f = f_old.copy() #it's actually faster not to go back to old location and just zigzag across the mode\r\n #old_obj = new_obj\r\n #rs += 1\r\n #else:\r\n #update_passed = True\r\n\r\n #old_Ki_f = self.Ki_f.copy()\r\n\r\n #difference = abs(new_obj - old_obj)\r\n #old_obj = new_obj.copy()\r\n difference = np.abs(np.sum(f - f_old)) + np.abs(np.sum(Ki_f - old_Ki_f))\r\n #difference = np.abs(np.sum(Ki_f - old_Ki_f))/np.float(self.N)\r\n old_Ki_f = Ki_f.copy()\r\n i += 1\r\n\r\n self.old_Ki_f = old_Ki_f.copy()\r\n\r\n #Warn of bad fits\r\n if difference > epsilon:\r\n self.bad_fhat = True\r\n warnings.warn(\"Not perfect f_hat fit difference: {}\".format(difference))\r\n elif self.bad_fhat:\r\n self.bad_fhat = False\r\n warnings.warn(\"f_hat now perfect again\")\r\n\r\n self.Ki_f = Ki_f\r\n return f",
"def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int"
] | [
"0.739296",
"0.6562701",
"0.65557134",
"0.65486634",
"0.63957083",
"0.6381057",
"0.6323597",
"0.62995636",
"0.6100595",
"0.59519017",
"0.59446114",
"0.59294933",
"0.59197277",
"0.5910358",
"0.5873097",
"0.58403426",
"0.5745301",
"0.5744658",
"0.5742014",
"0.57419306",
"0.5586465",
"0.5575102",
"0.55693614",
"0.5565277",
"0.554511",
"0.55450875",
"0.5477988",
"0.54771334",
"0.54395854",
"0.54345185"
] | 0.7455563 | 0 |
Reproducing kernel Calculate of reproducing kernel for even subspace of spherical harmonics of maximum degree N. | def even_kernel(mu, N):
# Check that -1 <= mu <= 1
mu = np.clip(mu, -1, 1)
# Need Legendre polynomials
legPolys = legp(mu, N)
coefs = 2*np.arange(0, N+1) + 1
ker = coefs[0::2]*legPolys[0::2]
return ker.sum() / (4.0*np.pi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)",
"def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A",
"def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()",
"def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)",
"def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)",
"def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()",
"def _kernel(r: float, h: float) -> float:\n sigma_2 = 10 / (7 * np.pi * h * h)\n q = abs(r / h)\n\n if q <= 1.0:\n q2 = q * q\n W = 1.0 - 1.5 * q2 * (1.0 - 0.5 * q)\n W *= sigma_2\n elif q <= 2.0:\n two_minus_q = 2 - q\n two_minus_q_c = np.power(two_minus_q, 3)\n W = 0.25 * two_minus_q_c\n W *= sigma_2\n else:\n W = 0\n\n return W",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])",
"def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)",
"def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)",
"def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi",
"def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org",
"def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)",
"def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org",
"def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int",
"def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()",
"def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel",
"def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel",
"def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI",
"def normkernel(S, T, n):\n\n k1 = kernel(S, S, n)\n k2 = kernel(T, T, n)\n res = kernel(S, T, n) / sqrt(k1 * k2)\n\n return res",
"def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X",
"def Pkernel(x):\n\n m = (x < 0.) & (x >= 1.)\n x[x < 0.] = np.zeros(np.sum(x < 0.))\n x[x >= 1.] = np.zeros(np.sum(x >= 1.))\n x = np.sqrt(x)\n\n result = np.log(2.) * np.log(2.) - np.pi *np.pi / 6. \\\n + 2. * spence(0.5 + 0.5 * x) - (x + x*x*x) / (1. - x*x) \\\n + (np.log(1. + x) - 2. * np.log(2.)) * np.log(1. - x) \\\n + 0.5 * (np.log(1. - x) * np.log(1. - x) - np.log(1. + x) * np.log(1. + x)) \\\n + 0.5 * (1. + x*x*x*x) / (1. - x*x) * (np.log(1. + x) - np.log(1. - x))\n result[x <= 0.] = np.zeros(np.sum(x <= 0.))\n result[x >= 1.] = np.zeros(np.sum(x >= 1.))\n return result",
"def kernel(self, modulus=None):\n M = self.matrix(modulus=modulus)\n if modulus is None:\n M = M.convert_to(QQ)\n # Note: Even when working over a finite field, what we want here is\n # the pullback into the integers, so in this case the conversion to ZZ\n # below is appropriate. When working over ZZ, the kernel should be a\n # ZZ-submodule, so, while the conversion to QQ above was required in\n # order for the nullspace calculation to work, conversion back to ZZ\n # afterward should always work.\n # TODO:\n # Watch <https://github.com/sympy/sympy/issues/21834>, which calls\n # for fraction-free algorithms. If this is implemented, we can skip\n # the conversion to `QQ` above.\n K = M.nullspace().convert_to(ZZ).transpose()\n return self.domain.submodule_from_matrix(K)"
] | [
"0.6827804",
"0.67321193",
"0.67197824",
"0.65318835",
"0.6531037",
"0.6433048",
"0.62198204",
"0.6084965",
"0.60187733",
"0.60123444",
"0.59886587",
"0.59792995",
"0.5959411",
"0.59217125",
"0.5896507",
"0.58899844",
"0.5817358",
"0.5794526",
"0.5773375",
"0.5705847",
"0.5680283",
"0.5675113",
"0.5667541",
"0.56564933",
"0.56430817",
"0.5638678",
"0.56132454",
"0.5592674",
"0.55905527",
"0.5588033"
] | 0.7083843 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.